0

I have one external network load balancer (listening on port 80) which forwards traffic to ServiceA instance (on port 9000). I'd like to configure an internal network load balancer that will get requests from ServiceA instances and forward them to ServiceB instance. However, I have a problem with configuring an internal NLB in terrafrom. Here's what I have at the moment:

resource "aws_security_group" "allow-all-traffic-for-internal-nlb" {
  name = "int-nlb"
  description = "Allow inbound and outbound traffic for internal NLB"
  vpc_id = "${aws_vpc.default.id}"
  ingress {
    from_port = 81
    protocol = "tcp"
    to_port = 81
    cidr_blocks = ["10.61.110.0/24"]
  }
  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }
}

resource "aws_lb" "serviceB_lb" {
  name = "serviceB-internal-lb"
  internal = true
  load_balancer_type = "network"
  subnets = ["${aws_subnet.sub.id}"]
}

resource "aws_lb_listener" "serviceB-internal-lb-listener" {
  load_balancer_arn = "${aws_lb.serviceB_lb.arn}"
  port = 81
  protocol = "TCP"

  default_action {
    target_group_arn = "${aws_lb_target_group.serviceB-internal-lb-tg.arn}"
    type = "forward"
  }
}

#create a target group for the load balancer and set up a health check
resource "aws_lb_target_group" "serviceB-internal-lb-tg" {
  name     = "serviceB-int-lb-tg"
  port = 81
  protocol = "TCP"
  vpc_id = "${aws_vpc.default.id}"
  target_type = "instance"

  health_check {
    protocol = "HTTP"
    port = "8181"
    path = "/"
  }
}

#attach a load balancer to the target group
resource "aws_lb_target_group_attachment" "attach-serviceB-tg-to-internal-nlb" {
  target_group_arn = "${aws_lb_target_group.serviceB-internal-lb-tg.arn}"
  port = 8181
  target_id = "${aws_instance.serviceB-1a.id}"
}

# Create Security Groups
resource "aws_security_group_rule" "serviceB_from_serviceB-lb" {
  type                     = "ingress"
  from_port                = 81
  to_port                  = 81
  protocol                 = "tcp"
  source_security_group_id = "${aws_security_group.allow-all-traffic-for-internal-nlb.id}"
  security_group_id        = "${aws_security_group.serviceB-sg.id}"
}

resource "aws_security_group_rule" "serviceB_nlb_to_serviceB" {
  type                     = "egress"
  from_port                = 81
  to_port                  = 81
  protocol                 = "tcp"
  source_security_group_id = "${aws_security_group.serviceB-sg.id}"
  security_group_id        = "${aws_security_group.allow-all-traffic-for-internal-nlb.id}"
}
####
resource "aws_security_group" "serviceB-sg" {
  name = "${var.environment}-serviceB-sg"
  description = "${var.environment} serviceB security group"
  vpc_id = "${aws_vpc.default.id}"
  ingress {
    from_port = 8181
    to_port = 8181
    protocol = "tcp"
    cidr_blocks = ["10.61.110.0/24"]
  }
}

The internal load balancer is listening on port 81, and the ServiceB instance is running on port 8181. Both external and internal NLBs and two services are located in one subnet. When I check the health status for the target group of the internal load balancer, I get a health check failure. What can cause this to happen?

0 Answers0