# Episode 183 – terrafom eks cilium
**Watch the episode:** {%youtube EjCjDr-URrQ %}
With [Duffie Cooley](https://bsky.app/profile/mauilion.dev)
## Upcoming events
* [Cisco Live](https://www.ciscolive.com/global.html) in San Diego
* [eBPF/Cilium talks](https://www.ciscolive.com/global/learn/session-catalog.html?search=ebpf#/) in the session catalog
* [Isovalent talks](https://www.linkedin.com/posts/isovalent_ciscolive-activity-7333148300172267522-JSdb?utm_source=share&utm_medium=member_desktop&rcm=ACoAAAAFCLUBX1eHC1JBMnqBMfBBIR-o4DVdxrQ)
---
main.tf
``` hcl
# Creating EKS Cluster
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 20.36"
# disable all addons we will add them later.
bootstrap_self_managed_addons = false
cluster_name = var.cluster_name
cluster_version = var.cluster_version
cluster_endpoint_public_access = true
# Add the cluster creator as a cluster admin.
enable_cluster_creator_admin_permissions = true
# Working example of adding a set of users known in aws iam that will have clusteradmin on the cluster as well.
# access_entries = {
# allusers = {
# kubernetes_groups = []
# principal_arn = "arn:aws:iam::${your aws account}:role/admin"
#
# policy_associations = {
# allusers = {
# policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
# access_scope = {
# type = "cluster"
# }
# }
# }
# }
# }
vpc_id = var.vpc_id
subnet_ids = var.private_subnets
tags = {
cluster = var.cluster_name
}
}
# Using Data Source to get all Avalablility Zones in Region
data "aws_availability_zones" "available_zones" {}
resource "helm_release" "cilium" {
name = "cilium"
description = "A Helm chart to deploy cilium"
namespace = "kube-system"
chart = "cilium"
version = var.cilium_version
repository = var.cilium_helm_repo
wait = false
# pass the cluster_endpoint to the helm values so that we can configure kube-proxy replacement.
set {
name = "k8sServiceHost"
value = trimprefix(module.eks.cluster_endpoint, "https://")
}
values = [file("${path.module}/cilium_values.yaml")]
depends_on = [module.eks.aws_eks_cluster]
}
data "aws_iam_policy_document" "aws_cilium_policy" {
# Adding extra permission according to docs https://docs.cilium.io/en/latest/network/concepts/ipam/eni/#required-privileges
statement {
effect = "Allow"
resources = ["*"]
actions = [
"ec2:CreateTags"
]
}
}
resource "aws_iam_policy" "aws_cilium_policy" {
name = "${var.cluster_name}-add-cilium"
description = "IAM Policy for Cilium"
policy = data.aws_iam_policy_document.aws_cilium_policy.json
tags = {
cluster = var.cluster_name
}
}
module "eks_nodegroup" {
source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group"
version = "~> 20.18"
name = "${var.cluster_name}-ng"
cluster_name = module.eks.cluster_name
cluster_version = var.cluster_version
subnet_ids = var.private_subnets
cluster_service_cidr = "172.20.0.0/16"
ami_id = var.image_id
instance_types = [var.instance_type]
min_size = var.worker_node_count
max_size = var.worker_node_count
desired_size = var.worker_node_count
enable_bootstrap_user_data = true
# Using --show-max-allowed would print the maximum allowed by ENI IPs, not having this flag caps the Max to 110 or 250 (30 or more CPUs)
post_bootstrap_user_data = <<-EOT
KUBELET_CONFIG=/etc/kubernetes/kubelet/kubelet-config.json
MAX_PODS=$(/etc/eks/max-pods-calculator.sh --instance-type-from-imds --cni-version 1.10.0 --cni-prefix-delegation-enabled)
echo "$(jq ".maxPods=$MAX_PODS" $KUBELET_CONFIG)" > $KUBELET_CONFIG
systemctl restart kubelet
EOT
labels = {
cluster = "${var.cluster_name}"
}
taints = {
# taint nodes so that application pods are
# not scheduled/executed until Cilium is deployed.
addons = {
key = "node.cilium.io/agent-not-ready"
value = "true"
effect = "NO_EXECUTE"
},
}
iam_role_additional_policies = {
Cilium_Policy = aws_iam_policy.aws_cilium_policy.arn
}
tags = {
Name = "${var.cluster_name}-ng"
cluster = var.cluster_name
}
depends_on = [helm_release.cilium]
}
# add coredns and the aws-ebc-csi driver.
module "eks_blueprints_addons" {
source = "aws-ia/eks-blueprints-addons/aws"
version = "~> 1.21.0"
cluster_name = module.eks.cluster_name
cluster_endpoint = module.eks.cluster_endpoint
cluster_version = module.eks.cluster_version
oidc_provider_arn = module.eks.oidc_provider_arn
eks_addons = {
coredns = {
most_recent = true
}
aws-ebs-csi-driver = {
most_recent = true
}
}
tags = {
cluster = var.cluster_name
}
depends_on = [module.eks_nodegroup]
}
# add the other wanted features.
module "enable_others" {
source = "aws-ia/eks-blueprints-addons/aws"
version = "~> 1.21.0"
cluster_name = module.eks.cluster_name
cluster_endpoint = module.eks.cluster_endpoint
cluster_version = module.eks.cluster_version
oidc_provider_arn = module.eks.oidc_provider_arn
enable_aws_load_balancer_controller = true
enable_kube_prometheus_stack = true
enable_metrics_server = true
tags = {
cluster = var.cluster_name
}
depends_on = [module.eks_blueprints_addons]
}
# this set creates a token based kubeconfig that can be used outside of the need for an aws token.
resource "kubernetes_service_account_v1" "admin" {
metadata {
name = local.sa_name
namespace = "kube-system"
}
automount_service_account_token = true
}
resource "kubernetes_secret_v1" "admin-token" {
depends_on = [
kubernetes_service_account_v1.admin
]
metadata {
name = "admin-token"
namespace = "kube-system"
annotations = {
"kubernetes.io/service-account.name" = "${local.sa_name}"
}
}
type = "kubernetes.io/service-account-token"
wait_for_service_account_token = true
}
resource "kubernetes_cluster_role_binding" "admin" {
metadata {
name = "${local.sa_name}-admin"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
kind = "ServiceAccount"
name = local.sa_name
namespace = "kube-system"
}
}
locals {
sa_name = "${var.cluster_name}-admin"
template_vars = {
cluster_name = var.cluster_name
cluster_endpoint = module.eks.cluster_endpoint
cluster_ca = module.eks.cluster_certificate_authority_data
token = kubernetes_secret_v1.admin-token.data.token
}
kubeconfig = templatefile("${path.module}/kubeconfig.tpl", local.template_vars)
}
```
variables.tf
``` hcl
# Stack name
variable "cluster_name" {
type = string
}
variable "cilium_version" {
type = string
default = "1.16.9"
}
variable "cilium_helm_repo" {
type = string
default = "https://helm.cilium.io"
}
variable "vpc_id" {
type = string
}
variable "public_subnets" {
type = list(any)
}
variable "private_subnets" {
type = list(any)
}
# Worker Node & Kubectl instance size
variable "instance_type" {
type = string
description = "Worker node's instance size"
}
# node count
variable "worker_node_count" {
type = string
description = "Worker node's count"
}
# AMI ID
variable "image_id" {
type = string
description = "AMI ID"
}
# Cluster Version
variable "cluster_version" {
type = string
description = "Cluster Version"
}
```
outputs.tf
``` hcl
output "kubeconfig" {
value = local.kubeconfig
}
```
providers.tf
``` hcl
provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", var.cluster_name]
command = "aws"
}
}
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
args = ["eks", "get-token", "--cluster-name", var.cluster_name]
command = "aws"
}
}
```
kubeconfig.tpl
``` yaml
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ${cluster_ca}
server: ${cluster_endpoint}
name: ${cluster_name}
contexts:
- context:
cluster: ${cluster_name}
user: ${cluster_name}
name: ${cluster_name}
current-context: ${cluster_name}
kind: Config
preferences: {}
users:
- name: ${cluster_name}
user:
token: ${token}
```