1

I have 3 vm (virtualbox). All of them setup to use a single VIP with keepalived. (192.168.100.200). I have one proxy on each vm and one test app on each vm. ( I am testing a high availability scenario where loosing one or two nodes, keeps the setup going). I have the keepalived working correctly. It is just that the requests are not loadbalanced, it always going to the same instance.

What is going wrong ?

version: "3.8"
services:

  # HAproxy
  haproxy                   :
    image                   : haproxy:2.3.2
    container_name          : haproxy
    networks                :
                            - app-net
    ports                   :
                            - 80:80
    volumes                 :
                            - /etc/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
    deploy                  :
      mode                  : global
      restart_policy        :
        condition           : on-failure
        delay               : 5s
        max_attempts        : 3
        window              : 120s

  # Nginx test site
  wwwsite                 :
    image                 : nginxdemos/hello
    networks              :
                          - app-net
    ports                 :
                          - 8080:80
    deploy                :
      mode                : global

  networks    :
  app-net     :
    driver    : overlay
    name      : app-net
    attachable: true

haproxy.conf

global
    stats socket /var/run/haproxy.stat mode 660 level admin
    stats timeout 30s
    user root
    group root

resolvers docker
    nameserver dns1 127.0.0.11:53
    resolve_retries 3
    timeout resolve 1s
    timeout retry   1s
    hold other      10s
    hold refused    10s
    hold nx         10s
    hold timeout    10s
    hold valid      10s
    hold obsolete   10s

defaults
    timeout connect 10s
    timeout client 30s
    timeout server 30s
    mode http

frontend  fe_web
    mode http
    bind *:80
    default_backend nodes

backend nodes
    balance roundrobin
    server node1 192.168.100.201:8080 check
    server node2 192.168.100.202:8080 check
    server node3 192.168.100.203:8080 check


listen stats 
    bind *:8081
    mode http
    stats enable
    stats uri /
    stats hide-version
CodeWeed
  • 971
  • 2
  • 21
  • 40
  • 1
    I never used haproxy, but judged by your haproxy.conf, it seems like instead of using the service name on port 80 over the container network, you are using the node's ip with the published ingress port (which by default uses ipvs to balance traffic to the replicas). I would suggest to use the {service name}:80 as backend node instead. – Metin Dec 22 '20 at 19:19
  • @Metin Actually it was load balancing, when I tried with curl. Yes, later I figured it out. I just forgot to delete the post. – CodeWeed Dec 23 '20 at 06:03

0 Answers0