1

I am working on a rather large and complex docker-compose.yaml file to orchestrate multiple services in Docker. When I try to run docker-compose up -d, I get the following error: "yaml: unknown anchor 'mayan-container' referenced". I am confused about what could be the cause of this error and how can I resolve it.

I have defined an anchor called 'mayan-container' in the x-mayan-container section of my YAML file, and I am using this anchor in several places throughout the file to define services. It seems that the anchor itself is well defined and contains no typos. However, despite this, the error persists.

My understanding is that the YAML anchors should be recognised and referenced correctly throughout the file, but in this case, something seems to be going wrong. Could anyone shed light on why this error is occurring and how I can fix it? Should I check something else in the YAML file or in the way I am using these anchors? I hope this paragraph helps you describe the problem clearly in your Stack Overflow post. If you need more information or additional help, don't hesitate to ask.

version: '3'

x-airflow-common:
  &airflow-common
  # In order to add custom dependencies or upgrade provider packages you can use your extended image.
  # Comment the image line, place your Dockerfile in the directory where you placed the docker-compose.yaml
  # and uncomment the "build" line below, Then run `docker-compose build` to build the images.
  image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.3.3}
  # build: .
  environment:
    &airflow-common-env
    AIRFLOW__CORE__EXECUTOR: CeleryExecutor
    AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:${POSTGRES_PASSWORD}@postgres/airflow
    # For backward compatibility, with Airflow <2.3
    AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:${POSTGRES_PASSWORD}@postgres/airflow
    AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:${POSTGRES_PASSWORD}@postgres/airflow
    AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0
    AIRFLOW__CORE__FERNET_KEY: ''
    AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true'
    AIRFLOW__CORE__LOAD_EXAMPLES: 'false'
    AIRFLOW__API__AUTH_BACKENDS: 'airflow.api.auth.backend.basic_auth'
    ENVIRONMENT: '${ENVIRONMENT}'
    TENANT_ID: '${TENANT_ID}'
    APPLICATION_ID: '${APPLICATION_ID}'
    SECRET_VALUE: '${SECRET_VALUE}'
    # Modificación 
    # Pip packages
    _PIP_ADDITIONAL_REQUIREMENTS: ${_PIP_ADDITIONAL_REQUIREMENTS:- azure-storage-file-datalake openpyxl azure.keyvault azure.identity python-dotenv}
  volumes:
    - ./airflow_dags:/opt/airflow/dags
    - ./airflow_logs:/opt/airflow/logs
    - ./airflow_plugins:/opt/airflow/plugins
  user: "${AIRFLOW_UID:-50000}:0"
  depends_on:
    &airflow-common-depends-on
    redis:
      condition: service_healthy
    postgres:
      condition: service_healthy

services:
  postgres:
    container_name: postgresql_airflow
    image: postgres:13
    environment:
      POSTGRES_USER: airflow
      POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
      POSTGRES_DB: airflow
    expose:
      - "5432"
    ports:
      - "5433:5432"
    volumes:
      - airflow-db-volume:/var/lib/postgresql/data
    healthcheck:
      test: ["CMD", "pg_isready", "-U", "airflow"]
      interval: 5s
      retries: 5
    restart: always

  airflow-webserver:
    container_name: airflow_webserver
    <<: *airflow-common
    command: webserver
    ports:
      - 8080:8080
    healthcheck:
      test: ["CMD", "curl", "--fail", "http://localhost:8080/health"]
      interval: 10s
      timeout: 10s
      retries: 5
    restart: always
    depends_on:
      <<: *airflow-common-depends-on
      airflow-init:
        condition: service_completed_successfully

  airflow-scheduler:
    container_name: airflow_scheduler
    <<: *airflow-common
    command: scheduler
    healthcheck:
      test: ["CMD-SHELL", 'airflow jobs check --job-type SchedulerJob --hostname "$${HOSTNAME}"']
      interval: 10s
      timeout: 10s
      retries: 5
    restart: always
    depends_on:
      <<: *airflow-common-depends-on
      airflow-init:
        condition: service_completed_successfully

  airflow-worker:
    container_name: airflow_worker
    <<: *airflow-common
    command: celery worker
    healthcheck:
      test:
        - "CMD-SHELL"
        - 'celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"'
      interval: 10s
      timeout: 10s
      retries: 5
    environment:
      <<: *airflow-common-env
      # Required to handle warm shutdown of the celery workers properly
      # See https://airflow.apache.org/docs/docker-stack/entrypoint.html#signal-propagation
      DUMB_INIT_SETSID: "0"
    restart: always
    depends_on:
      <<: *airflow-common-depends-on
      airflow-init:
        condition: service_completed_successfully

  airflow-triggerer:
    container_name: airflow_triggerer
    <<: *airflow-common
    command: triggerer
    healthcheck:
      test: ["CMD-SHELL", 'airflow jobs check --job-type TriggererJob --hostname "$${HOSTNAME}"']
      interval: 10s
      timeout: 10s
      retries: 5
    restart: always
    depends_on:
      <<: *airflow-common-depends-on
      airflow-init:
        condition: service_completed_successfully

  airflow-init:
    container_name: airflow_init
    <<: *airflow-common
    entrypoint: /bin/bash
    # yamllint disable rule:line-length
    command:
      - -c
      - |
        function ver() {
          printf "%04d%04d%04d%04d" $${1//./ }
        }
        airflow_version=$$(AIRFLOW__LOGGING__LOGGING_LEVEL=INFO && gosu airflow airflow version)
        airflow_version_comparable=$$(ver $${airflow_version})
        min_airflow_version=2.2.0
        min_airflow_version_comparable=$$(ver $${min_airflow_version})
        if (( airflow_version_comparable < min_airflow_version_comparable )); then
          echo
          echo -e "\033[1;31mERROR!!!: Too old Airflow version $${airflow_version}!\e[0m"
          echo "The minimum Airflow version supported: $${min_airflow_version}. Only use this or higher!"
          echo
          exit 1
        fi
        if [[ -z "${AIRFLOW_UID}" ]]; then
          echo
          echo -e "\033[1;33mWARNING!!!: AIRFLOW_UID not set!\e[0m"
          echo "If you are on Linux, you SHOULD follow the instructions below to set "
          echo "AIRFLOW_UID environment variable, otherwise files will be owned by root."
          echo "For other operating systems you can get rid of the warning with manually created .env file:"
          echo "    See: https://airflow.apache.org/docs/apache-airflow/stable/start/docker.html#setting-the-right-airflow-user"
          echo
        fi
        one_meg=1048576
        mem_available=$$(($$(getconf _PHYS_PAGES) * $$(getconf PAGE_SIZE) / one_meg))
        cpus_available=$$(grep -cE 'cpu[0-9]+' /proc/stat)
        disk_available=$$(df / | tail -1 | awk '{print $$4}')
        warning_resources="false"
        if (( mem_available < 4000 )) ; then
          echo
          echo -e "\033[1;33mWARNING!!!: Not enough memory available for Docker.\e[0m"
          echo "At least 4GB of memory required. You have $$(numfmt --to iec $$((mem_available * one_meg)))"
          echo
          warning_resources="true"
        fi
        if (( cpus_available < 2 )); then
          echo
          echo -e "\033[1;33mWARNING!!!: Not enough CPUS available for Docker.\e[0m"
          echo "At least 2 CPUs recommended. You have $${cpus_available}"
          echo
          warning_resources="true"
        fi
        if (( disk_available < one_meg * 10 )); then
          echo
          echo -e "\033[1;33mWARNING!!!: Not enough Disk space available for Docker.\e[0m"
          echo "At least 10 GBs recommended. You have $$(numfmt --to iec $$((disk_available * 1024 )))"
          echo
          warning_resources="true"
        fi
        if [[ $${warning_resources} == "true" ]]; then
          echo
          echo -e "\033[1;33mWARNING!!!: You have not enough resources to run Airflow (see above)!\e[0m"
          echo "Please follow the instructions to increase amount of resources available:"
          echo "   https://airflow.apache.org/docs/apache-airflow/stable/start/docker.html#before-you-begin"
          echo
        fi
        mkdir -p /sources/logs /sources/dags /sources/plugins
        chown -R "${AIRFLOW_UID}:0" /sources/{logs,dags,plugins}
        exec /entrypoint airflow version
    # yamllint enable rule:line-length
    environment:
      <<: *airflow-common-env
      _AIRFLOW_DB_UPGRADE: 'true'
      _AIRFLOW_WWW_USER_CREATE: 'true'
      _AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-belero-admin}
      _AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-${AIRFLOW_PASSWORD}}
      _PIP_ADDITIONAL_REQUIREMENTS: ''
    user: "0:0"
    volumes:
      - .:/sources

  airflow-cli:
    container_name: airflow_cli
    <<: *airflow-common
    profiles:
      - debug
    environment:
      <<: *airflow-common-env
      CONNECTION_CHECK_MAX_COUNT: "0"
    # Workaround for entrypoint issue. See: https://github.com/apache/airflow/issues/16252
    command:
      - bash
      - -c
      - airflow

  # You can enable flower by adding "--profile flower" option e.g. docker-compose --profile flower up
  # or by explicitly targeted on the command line e.g. docker-compose up flower.
  # See: https://docs.docker.com/compose/profiles/
  flower:
    container_name: airflow_flower
    <<: *airflow-common
    command: celery flower
    profiles:
      - flower
    ports:
      - 5555:5555
    healthcheck:
      test: ["CMD", "curl", "--fail", "http://localhost:5555/"]
      interval: 10s
      timeout: 10s
      retries: 5
    restart: always
    depends_on:
      <<: *airflow-common-depends-on
      airflow-init:
        condition: service_completed_successfully

  djangoappdev:
    container_name: django_app_dev
    build: .
    environment:
      ENVIRONMENT: '${ENVIRONMENT}'
      TENANT_ID: '${TENANT_ID}'
      APPLICATION_ID: '${APPLICATION_ID}'
      SECRET_VALUE: '${SECRET_VALUE}'
    ports:
      # Where the django app runs 
      - "8001:8001"
    volumes:
      - ./beleroforms:/code/beleroforms
      - ./beleroforms_app:/code/beleroforms_app

  djangodb:
    container_name: django_app_db_dev
    # image: postgres:latest
    image: postgres:14.7
    volumes:
      - postgres_data:/var/lib/postgresql/data/
    environment:
      - "POSTGRES_USER=django"
      - "POSTGRES_PASSWORD=${POSTGRES_PASSWORD_DJANGO}"
      - "POSTGRES_DB=django"
    expose:
      - "5432"
    ports:
      - "5434:5432" 
    app:
    <<: [*mayan-container,*mayan-traefik-labels,*mayan-frontend-ports]
    profiles:
      - all_in_one

  elasticsearch:
    environment:
      - bootstrap.memory_lock=true
      - discovery.type=single-node
      - http.max_content_length=400mb
      - xpack.security.enabled=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - ELASTIC_PASSWORD=${MAYAN_ELASTICSEARCH_PASSWORD:-mayanespassword}
    image: ${MAYAN_DOCKER_ELASTICSEARCH_IMAGE:-elasticsearch}:${MAYAN_DOCKER_ELASTICSEARCH_TAG:-7.17.9}
    networks:
      - mayan
    # Enable to allow external access to the database.
    # ports:
    #  - "9200:9200"
    profiles:
      - elasticsearch
    restart: unless-stopped
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - ${MAYAN_ELASTICSEARCH_VOLUME:-elasticsearch}:/usr/share/elasticsearch/data

  keycloak:
    command:
      - start
    environment:
      KEYCLOAK_ADMIN: ${MAYAN_KEYCLOAK_ADMIN:-admin}
      KEYCLOAK_ADMIN_PASSWORD: ${MAYAN_KEYCLOAK_ADMIN_PASSWORD:-admin}
      KC_DB: postgres
      KC_DB_PASSWORD: ${MAYAN_KEYCLOAK_DATABASE_PASSWORD:-keycloakdbpass}
      KC_DB_URL_DATABASE: ${MAYAN_KEYCLOAK_DATABASE_NAME:-keycloak}
      KC_DB_URL_HOST: keycloak-postgres
      KC_DB_USERNAME: ${MAYAN_DATABASE_KEYCLOAK_USER:-keycloak}
      KC_HOSTNAME_URL: http://127.0.0.1:8081/
      KC_HOSTNAME_STRICT: false
      KC_HTTP_ENABLED: true
    image: ${MAYAN_DOCKER_KEYCLOAK_IMAGE:-keycloak/keycloak}:${MAYAN_DOCKER_KEYCLOAK_TAG:-20.0.1}
    labels:
      - "traefik.enable=${MAYAN_TRAEFIK_KEYCLOAK_ENABLE:-false}"
      - "traefik.http.middlewares.keycloak_http_redirect.redirectscheme.scheme=https"
      - "traefik.http.middlewares.keycloak_http_redirect.redirectscheme.permanent=false"
      - "traefik.http.routers.keycloak_http.entrypoints=http"
      - "traefik.http.routers.keycloak_http.middlewares=keycloak_http_redirect"
      - "traefik.http.routers.keycloak_http.rule=Host(`${MAYAN_TRAEFIK_EXTERNAL_DOMAIN}`)"
      - "traefik.http.routers.keycloak_https.entrypoints=https"
      - "traefik.http.routers.keycloak_https.rule=Host(`${MAYAN_TRAEFIK_EXTERNAL_DOMAIN}`)"
      - "traefik.http.routers.keycloak_https.service=keycloak_http"
      - "traefik.http.routers.keycloak_https.tls=true"
      - "traefik.http.routers.keycloak_https.tls.certresolver=letsencrypt"
      - "traefik.http.services.keycloak_http.loadbalancer.server.port=${MAYAN_TRAEFIK_KEYCLOAK_HTTP_PORT:-8081}"
    networks:
      - keycloak
      - mayan
    # Disable ports if using Traefik.
    ports:
      - "${MAYAN_TRAEFIK_KEYCLOAK_HTTP_PORT:-8081}:${MAYAN_TRAEFIK_KEYCLOAK_HTTP_PORT:-8081}"
    profiles:
      - keycloak
    restart: unless-stopped

  keycloak-postgres:
    environment:
      POSTGRES_DB: ${MAYAN_KEYCLOAK_DATABASE_NAME:-keycloak}
      POSTGRES_PASSWORD: ${MAYAN_KEYCLOAK_DATABASE_PASSWORD:-keycloakdbpass}
      POSTGRES_USER: ${MAYAN_DATABASE_KEYCLOAK_USER:-keycloak}
    image: postgres:${MAYAN_DOCKER_KEYCLOAK_POSTGRES_TAG:-13.8-alpine}
    networks:
      - keycloak
    profiles:
      - keycloak_postgresql
    restart: unless-stopped
    volumes:
      - ${MAYAN_KEYCLOAK_POSTGRES_VOLUME:-keycloak-postgres}:/var/lib/postgresql/data

  postgresql:
    command:
      - "postgres"
      - "-c"
      - "checkpoint_completion_target=0.6"
      - "-c"
      - "default_statistics_target=200"
      - "-c"
      - "maintenance_work_mem=128MB"
      - "-c"
      - "max_connections=150"
      - "-c"
      - "shared_buffers=256MB"
      - "-c"
      - "work_mem=8MB"
    environment:
      POSTGRES_DB: ${MAYAN_DATABASE_NAME:-mayan}
      POSTGRES_PASSWORD: ${MAYAN_DATABASE_PASSWORD:-mayandbpass}
      POSTGRES_USER: ${MAYAN_DATABASE_USER:-mayan}
    image: ${MAYAN_DOCKER_POSTGRES_IMAGE:-postgres}:${MAYAN_DOCKER_POSTGRES_TAG:-13.10-alpine}
    networks:
      - mayan
    # Enable to allow external access to the database.
    # ports:
    #  - "5432:5432"
    profiles:
      - postgresql
    restart: unless-stopped
    volumes:
      - ${MAYAN_POSTGRES_VOLUME:-postgres}:/var/lib/postgresql/data

  redis:
    command:
      - redis-server
      - --appendonly
      - "no"
      - --databases
      - "3"
      - --maxmemory
      - "100mb"
      - --maxclients
      - "500"
      - --maxmemory-policy
      - "allkeys-lru"
      - --save
      - ""
      - --tcp-backlog
      - "256"
      - --requirepass
      - "${MAYAN_REDIS_PASSWORD:-mayanredispassword}"
    image: ${MAYAN_DOCKER_REDIS_IMAGE:-redis}:${MAYAN_DOCKER_REDIS_TAG:-7.0.10-alpine}
    networks:
      - mayan
    profiles:
      - redis
    restart: unless-stopped
    volumes:
      - ${MAYAN_REDIS_VOLUME:-redis}:/data

  # Run a frontend gunicorn container
  frontend:
    <<: [*mayan-container,*mayan-traefik-labels,*mayan-frontend-ports]
    command:
      - run_frontend
    profiles:
      - extra_frontend

  # Enable to run standalone workers
  mountindex:
    <<: *mayan-container
    cap_add:
      - SYS_ADMIN
    devices:
      - "/dev/fuse:/dev/fuse"
    entrypoint:
      - /bin/sh
      - -c
      - 'mkdir --parents /mnt/index && chown mayan:mayan /mnt/index && /usr/local/bin/entrypoint.sh run_command "mirroring_mount_index --allow-other creation_date /mnt/index"'  # Replace "creation_date" with the index of your choice.
    profiles:
      - mountindex
    security_opt:
      - apparmor:unconfined
    volumes:
      - type: bind
        source: /mnt/mayan_indexes/creation_date  # Host location where the index will show up.
        target: /mnt/index  # Location inside the container where the index will be mounted. Must the same is in the "entrypoint" section.
        bind:
          propagation: shared

  # Run a separate class A worker
  worker_a:
    <<: *mayan-container
    command:
      - run_worker
      - worker_a
      - "--prefetch-multiplier=1"
    profiles:
      - extra_worker_a

  # Run a separate class B worker
  worker_b:
    <<: *mayan-container
    command:
      - run_worker
      - worker_b
      - "--prefetch-multiplier=1"
    profiles:
      - extra_worker_b

  # Run a separate class C worker
  worker_c:
    <<: *mayan-container
    command:
      - run_worker
      - worker_c
      - "--prefetch-multiplier=1"
    profiles:
      - extra_worker_c

  # Run a separate class D worker
  worker_d:
    <<: *mayan-container
    command:
      - run_worker
      - worker_d
      - "--concurrency=1 --prefetch-multiplier=1"
    profiles:
      - extra_worker_d

  worker_custom_queue:
    <<: *mayan-container
    command:
      - /bin/sh
      - -c
      - 'MAYAN_QUEUE_LIST=${MAYAN_WORKER_CUSTOM_QUEUE_LIST} /usr/local/bin/run_worker.sh --prefetch-multiplier=1'
    profiles:
      - extra_worker_custom

  # Run a separate Celery beat container
  celery_beat:
    <<: *mayan-container
    command:
      - run_celery
      - "beat --pidfile= --loglevel=ERROR"
    profiles:
      - extra_celery_beat

  setup_or_upgrade:
    <<: *mayan-container
    command:
      - run_initial_setup_or_perform_upgrade
    profiles:
      - extra_setup_or_upgrade
    restart: "no"

  rabbitmq:
    image: ${MAYAN_DOCKER_RABBITMQ_IMAGE:-rabbitmq}:${MAYAN_DOCKER_RABBITMQ_TAG:-3.11.13-management-alpine}
    environment:
      RABBITMQ_DEFAULT_USER: ${MAYAN_RABBITMQ_USER:-mayan}
      RABBITMQ_DEFAULT_PASS: ${MAYAN_RABBITMQ_PASSWORD:-mayanrabbitpass}
      RABBITMQ_DEFAULT_VHOST: ${MAYAN_RABBITMQ_VHOST:-mayan}
    labels:
      - "traefik.enable=${MAYAN_TRAEFIK_RABBITMQ_ENABLE:-false}"
      - "traefik.http.routers.rabbitmq_admin_http.entrypoints=rabbitmq_admin_http"
      - "traefik.http.routers.rabbitmq_admin_http.rule=Host(`${MAYAN_TRAEFIK_EXTERNAL_DOMAIN}`)"
      - "traefik.http.routers.rabbitmq_admin_http.service=rabbitmq_admin_http"
      - "traefik.http.routers.rabbitmq_admin_http.tls=true"
      - "traefik.http.routers.rabbitmq_admin_http.tls.certresolver=letsencrypt"
      - "traefik.http.services.rabbitmq_admin_http.loadbalancer.server.port=15672"
    networks:
      - mayan
    # Enable to allow access to the administration interface.
    # ports:
    #   - "${MAYAN_RABBITMQ_ADMIN_PORT:-15672}:15672"
    profiles:
      - rabbitmq
    restart: unless-stopped
    volumes:
      - ${MAYAN_RABBITMQ_VOLUME:-rabbitmq}:/var/lib/rabbitmq

  traefik:
    container_name: "traefik"
    command:
      # - "--log.level=DEBUG"
      - "--api.dashboard=true"
      - "--api.insecure=${MAYAN_TRAEFIK_API_INSECURE:-false}"
      - "--certificatesresolvers.letsencrypt.acme.caserver=${MAYAN_TRAEFIK_LETS_ENCRYPT_SERVER:-https://acme-staging-v02.api.letsencrypt.org/directory}"
      - "--certificatesresolvers.letsencrypt.acme.dnschallenge=${MAYAN_TRAEFIK_LETS_ENCRYPT_DNS_CHALLENGE:-false}"
      - "--certificatesresolvers.letsencrypt.acme.dnschallenge.provider=${MAYAN_TRAEFIK_LETS_ENCRYPT_DNS_CHALLENGE_PROVIDER}"
      - "--certificatesresolvers.letsencrypt.acme.email=${MAYAN_TRAEFIK_LETS_ENCRYPT_EMAIL}"
      - "--certificatesresolvers.letsencrypt.acme.storage=/traefik-certificates-letsencrypt/acme.json"
      - "--certificatesresolvers.letsencrypt.acme.tlschallenge=${MAYAN_TRAEFIK_LETS_ENCRYPT_TLS_CHALLENGE:-false}"
      - "--entrypoints.http.address=:80"
      - "--entrypoints.https.address=:443"
      - "--entrypoints.keycloak_http.address=:${MAYAN_TRAEFIK_KEYCLOAK_HTTP_PORT:-8081}"
      - "--entrypoints.rabbitmq_admin_http.address=:15672"
      - "--entrypoints.traefik_dashboard_http.address=:8080"
      - "--providers.docker=true"
      - "--providers.docker.exposedbydefault=false"
    # - Add DNS provider variables (https://doc.traefik.io/traefik/https/acme/#providers)
    # environment:
    image: ${MAYAN_DOCKER_TRAEFIK_IMAGE:-traefik}:${MAYAN_DOCKER_TRAEFIK_TAG:-v2.5}
    labels:
      - "traefik.enable=${MAYAN_TRAEFIK_DASHBOARD_ENABLE:-false}"
      - "traefik.http.middlewares.basic-auth-global.basicauth.users=${MAYAN_TRAEFIK_DASHBOARD_AUTHENTICATION}"
      - "traefik.http.routers.traefik_https.entrypoints=traefik_dashboard_http"
      - "traefik.http.routers.traefik_https.middlewares=basic-auth-global"
      - "traefik.http.routers.traefik_https.rule=Host(`${MAYAN_TRAEFIK_EXTERNAL_DOMAIN}`)"
      - "traefik.http.routers.traefik_https.service=api@internal"
      - "traefik.http.routers.traefik_https.tls=true"
      - "traefik.http.routers.traefik_https.tls.certresolver=letsencrypt"
    networks:
      - mayan
      - traefik
    ports:
      - "${MAYAN_RABBITMQ_ADMIN_HTTP_PORT:-15672}:15672"
      - "${MAYAN_TRAEFIK_DASHBOARD_HTTP_PORT:-8080}:8080"
      - "${MAYAN_TRAEFIK_KEYCLOAK_HTTP_PORT:-8081}:8081"
      - "${MAYAN_TRAEFIK_HTTP_PORT:-80}:80"
      - "${MAYAN_TRAEFIK_HTTPS_PORT:-443}:443"
    profiles:
      - traefik
    restart: unless-stopped
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - ${MAYAN_TRAEFIK_LETSENCRYPT_VOLUME:-traefik-certificates-letsencrypt}:/traefik-certificates-letsencrypt

x-mayan-container:
  &mayan-container
  env_file: .env
  environment:
    MAYAN_CELERY_BROKER_URL: amqp://${MAYAN_RABBITMQ_USER:-mayan}:${MAYAN_RABBITMQ_PASSWORD:-mayanrabbitpass}@rabbitmq:5672/${MAYAN_RABBITMQ_VHOST:-mayan}
    MAYAN_CELERY_RESULT_BACKEND: redis://:${MAYAN_REDIS_PASSWORD:-mayanredispassword}@redis:6379/1
    MAYAN_DATABASES: "{'default':{'ENGINE':'django.db.backends.postgresql','NAME':'${MAYAN_DATABASE_NAME:-mayan}','PASSWORD':'${MAYAN_DATABASE_PASSWORD:-mayandbpass}','USER':'${MAYAN_DATABASE_USER:-mayan}','HOST':'${MAYAN_DATABASE_HOST:-postgresql}'} }"
    MAYAN_LOCK_MANAGER_BACKEND: mayan.apps.lock_manager.backends.redis_lock.RedisLock
    MAYAN_LOCK_MANAGER_BACKEND_ARGUMENTS: "{'redis_url':'redis://:${MAYAN_REDIS_PASSWORD:-mayanredispassword}@redis:6379/2'}"
  image: ${MAYAN_DOCKER_IMAGE_NAME:-mayanedms/mayanedms}:${MAYAN_DOCKER_IMAGE_TAG:-s4.4}
  networks:
    - mayan
  restart: unless-stopped
  volumes:
    - ${MAYAN_APP_VOLUME:-app}:/var/lib/mayan
    # Optional volumes to access external data like staging or watch folders
    # - /opt/staging_folder:/staging_folder
    # - /opt/watch_folder:/watch_folder

x-mayan-traefik-labels:
  &mayan-traefik-labels
  labels:
    - "traefik.enable=${MAYAN_TRAEFIK_FRONTEND_ENABLE:-false}"
    - "traefik.http.middlewares.mayan_frontend_http_redirect.redirectscheme.scheme=https"
    - "traefik.http.middlewares.mayan_frontend_http_redirect.redirectscheme.permanent=false"
    - "traefik.http.routers.mayan_frontend_http.entrypoints=http"
    - "traefik.http.routers.mayan_frontend_http.middlewares=mayan_frontend_http_redirect"
    - "traefik.http.routers.mayan_frontend_http.rule=Host(`${MAYAN_TRAEFIK_EXTERNAL_DOMAIN}`)"
    - "traefik.http.routers.mayan_frontend_https.entrypoints=https"
    - "traefik.http.routers.mayan_frontend_https.rule=Host(`${MAYAN_TRAEFIK_EXTERNAL_DOMAIN}`)"
    - "traefik.http.routers.mayan_frontend_https.service=mayan_frontend_http"
    - "traefik.http.routers.mayan_frontend_https.tls=true"
    - "traefik.http.routers.mayan_frontend_https.tls.certresolver=letsencrypt"
    - "traefik.http.services.mayan_frontend_http.loadbalancer.server.port=8000"


x-mayan-frontend-ports:
  &mayan-frontend-ports
  # Disable ports if using Traefik. Set to an empty list `[]`.
  ports:
    - "${MAYAN_FRONTEND_HTTP_PORT:-80}:8000"
    # []

networks:
  keycloak:
    driver: bridge
    # Change to true when using Traefik for increased security.
    internal: false
  mayan:
    driver: bridge
    # Change to true when using Traefik for increased security.
    internal: false
  traefik: {}

volumes:
  airflow-db-volume:
  airflow_dags:
  airflow_logs:
  airflow_plugins:
  postgres_data:
  app:
  elasticsearch:
  keycloak-postgres:
  postgres:
  mountindex:
  rabbitmq:
  redis:
  traefik-certificates-letsencrypt:

I don't know what can happen.

0 Answers0