0

I am trying to create EKS cluster with manages nodegroup and I want to execute shell script that hardens worker nodes and sets proxy settings before cluster bootstrapp.

here is my "main.tf" file

module "eks" {
  source                                = "./modules/eks"
  cluster_name                          = "xxxxxxxxxxxxxxxxxx"
  cluster_version                       = "1.27"
  vpc_id                                = "xxxxxxxxxxxxxxxxxxxxxx"
  control_plane_subnet_ids              = ["subnet-xxxxxxxxxx", "subnet-xxxxxxxxxxx", "subnet-xxxxxxxxxxxx"]
  subnet_ids                            = ["subnet-xxxxxxxxxxxxx", "subnet-xxxxxxxxxxxxx", "subnet-xxxxxxxxxxxxx", "subnet-xxxxxxxxxxxxxxxx", "subnet-xxxxxxxxxxxxxxxx", "subnet-xxxxxxx"]
  cluster_endpoint_public_access = false
  create_aws_auth_configmap = false
  manage_aws_auth_configmap = false
  aws_auth_users = [
    {
      userarn  = "arn:aws:iam::xxxxxxxxxxx:user/test-user"
      username = "test-user"
      groups   = ["system:masters"]
    },
    ]
  eks_managed_node_groups = {
    test = {
      ami_id                     = "ami-xxxxxxxxxxxxxxx"
      enable_bootstrap_user_data = false
      pre_bootstrap_user_data   = templatefile("${path.module}/userdata.tpl",{ cluster_endpoint = module.eks.cluster_endpoint, cluster_certificate = module.eks.cluster_certificate_authority_data, cluster_name = module.eks.cluster_name })
      instance_types             = ["t2.medium"]
      min_size                   = 1
      max_size                   = 1
      desired_size               = 1
      capacity_type              = "ON_DEMAND"
      labels = {
        app  = "test"
      }
      block_device_mappings = {
        xvda = {
          device_name = "/dev/xvda"
          ebs = {
            name                  = "disk-DR"
            volume_size           = 100
            volume_type           = "gp3"
            delete_on_termination = true
            tags = {
              "Environment"  = "Testing"
            }
          }
        }
      }
      tags = {
        "Environment"  = "Testing"
      }
    }
  }
  node_security_group_additional_rules = {
    ingress_443 = {
      description                   = "Cluster node internal communication"
      protocol                      = "TCP"
      from_port                     = 443
      to_port                       = 443
      type                          = "ingress"
      self                          = true
    }
  }
}

here is my "user_data.tpl" file.

yum update -y
chmod 600 /etc/kubernetes/kubelet
ls -ld /etc/kubernetes/kubelet >> /var/log/vaca.log
chmod 600 /etc/kubernetes/kubelet/kubelet-config.json
ls -l /etc/kubernetes/kubelet/kubelet-config.json >> /var/log/vaca.log
sed -i 's/--hostname-override=[^ ]*//g' /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
sed -i '5s/^/ /;3i\"eventRecordQPS":0,' /etc/kubernetes/kubelet/kubelet-config.json
systemctl daemon-reload
systemctl restart kubelet
systemctl is-active kubelet >> /var/log/vaca.log
grep "evenRecordQPS" /etc/kubernetes/kubelet/kubelet-config.json >> /var/log/vaca.log
grep "--hostname-override" /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf >> /var/log/vaca.log
echo "tmpfs /tmp tmpfs defaults,rw,nosuid,nodev,noexec,relatime 0 0" >> /etc/fstab
mount -a
/etc/bootstrap.sh --apiserver-endpoint ${cluster_endpoint} --b64-cluster-ca ${cluster_certificate} ${cluster_name}

I am getting following error when I run "terraform plan" and "terraform apply".

Error: Invalid count argument
 on modules/eks/modules/_user_data/main.tf line 67, in data "cloudinit_config" "linux_eks_managed_node_group":
 67: count = count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0

 The "count" value depends on resource attributes that cannot be determined
 until apply, so Terraform cannot predict how many instances will be created. 
 To work around this, use the -target argument to first apply only
 the resources that the cound depends on.

##[Warning]Can't find loc string for key: TerraformPlanFailed
##[error]: Error: TerraformPlanFailed 1

here is data block from main.tf (from _user_data module (EKS Terraform module)) for quick reference.

data "cloudinit_config" "linux_eks_managed_node_group" {
  count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0

  base64_encode = true
  gzip          = false
  boundary      = "//"

  # Prepend to existing user data supplied by AWS EKS
  part {
    content_type = "text/x-shellscript"
    content      = var.pre_bootstrap_user_data
  }
}

0

You must log in to answer this question.

Browse other questions tagged .