Try   HackMD

Deploying GKE Clusters with Terraform from scratch

1. 先決條件

  1. 建立 Google Mail 帳號
  2. 申請免費試用 Google Cloud Platform 資格 (注意,需要一張 VISA 或信用卡)
    https://console.cloud.google.com/getting-started
  3. 在 Google Cloud Platform 建立 project
    • 點選左上角 "My First Project" 專案的按鈕
      點我展開圖片

      image

    • 點選右上角 "新增專案" 按鈕
      點我展開圖片

      image

    • 輸入 testgke,點選 "建立" 按鈕
      點我展開圖片

      image

  4. 確認在剛建立的 gketest 專案後,點選左上角按鈕
    點我展開圖片

    image

  5. 點選 Kubernetes Engine 按鈕
    點我展開圖片

    image

  6. 點選 "啟用" 按鈕,啟用 Kubernetes Engine API
    點我展開圖片

    image

  7. 點選右上角 "Cloud Shell" 按鈕,並授權 Cloud Shell
    點我展開圖片

    image

2. 開始部署

$ mkdir gcp-terraform; cd gcp-terraform

$ nano gke.tf
resource "google_container_cluster" "gke_cluster" {
  name               = "k1"
  location           = "asia-east1-b"
  initial_node_count = 1

  network    = google_compute_network.vpc.name
  subnetwork = google_compute_subnetwork.subnet.name  
  networking_mode = "VPC_NATIVE"

  logging_service = "none"
  monitoring_service = "none"

  private_cluster_config {
    enable_private_endpoint = false
    enable_private_nodes    = true
    master_ipv4_cidr_block  = "10.13.0.0/28"
  }
  ip_allocation_policy {
    cluster_ipv4_cidr_block  = "10.98.0.0/16"
    services_ipv4_cidr_block = "10.244.0.0/16"
  }
  node_config {
    machine_type = "e2-custom-4-12288" 
  }
}

output "cluster_endpoint" {
  value = google_container_cluster.gke_cluster.endpoint
}

output "cluster_ca_certificate" {
  value     = google_container_cluster.gke_cluster.master_auth.0.cluster_ca_certificate
  sensitive = true
}

$ nano variable.tf
#PROJECT INFO
variable "PROJECT_ID" {
  default = "gketest-462015"
}

variable "REGION" {
  default = "asia-east1"
}

#VPC
variable "VPC_NAME" {
  default = "k8s-vpc"
}

variable "VPC_NAME2" {
  default = "k8s-vpc2"
}

variable "VPC_SUBNET_NAME" {
  default = "asia-east1"
}

variable "VPC_SUBNET_NAME2" {
  default = "asia-east1-2"
}

variable "IP_CIDR_RANGE" {
  default = "10.10.0.0/24"
}

variable "IP_CIDR_RANGE2" {
  default = "10.10.1.0/24"
}

variable "ROUTER_NAME" {
  default = "demo-route"
}

variable "ROUTER_NAME2" {
  default = "demo-route2"
}

variable "NAT_NAME" {
  default = "demo-nat"
}

variable "NAT_NAME2" {
  default = "demo-nat2"
}

#GKE

variable "GKE_LOCATION" {
  default = "asia-east1-b"
}

variable "GKE_MACHINE_TYPE" {
  default = "e2-custom-2-8192"
}

$ nano vpc.tf


resource "google_compute_network" "vpc" {
  name                    = var.VPC_NAME
  auto_create_subnetworks = "false"
}


resource "google_compute_subnetwork" "subnet" {
  name          = "${var.VPC_SUBNET_NAME}-subnet"
  region        = var.REGION
  network       = google_compute_network.vpc.name
  ip_cidr_range = var.IP_CIDR_RANGE
  private_ip_google_access   = true
}

#resource "google_compute_subnetwork" "subnet2" {
#  name          = "${var.VPC_SUBNET_NAME}2-subnet"
#  region        = var.REGION
#  network       = google_compute_network.vpc.name
#  ip_cidr_range = var.IP_CIDR_RANGE2
#  private_ip_google_access   = true
#}


resource "google_compute_router" "router" {
  name    = var.ROUTER_NAME
  region  = google_compute_subnetwork.subnet.region
  network = google_compute_network.vpc.id
}

#resource "google_compute_address" "address" {
#  count  = 2
#  name   = "nat-manual-ip-${count.index}"
#  region = google_compute_subnetwork.subnet.region
#}


resource "google_compute_address" "r1-address" {
  name   = "nat-address"
  region = google_compute_subnetwork.subnet.region
}

#resource "google_compute_address" "r1-address-2" {
#  name   = "nat-address2"
#  region = google_compute_subnetwork.subnet.region
#}


resource "google_compute_router_nat" "nat_manual" {
  name   = var.NAT_NAME
  router = google_compute_router.router.name
  region = google_compute_router.router.region

  nat_ip_allocate_option = "MANUAL_ONLY"
  nat_ips                = [google_compute_address.r1-address.self_link]
  enable_dynamic_port_allocation      = true
  enable_endpoint_independent_mapping = false
  min_ports_per_vm                    = 4096
  max_ports_per_vm                    = 65536     
  source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
  subnetwork {
    name                    = google_compute_subnetwork.subnet.id
    source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
  }
}


#resource "google_compute_router" "router2" {
#  name    = var.ROUTER_NAME2
#  region  = google_compute_subnetwork.subnet2.region
#  network = google_compute_network.vpc.id
#}

#resource "google_compute_address" "r2-address" {
#  name   = "r2-nat-address"
#  region = google_compute_subnetwork.subnet2.region
#}

#resource "google_compute_router_nat" "nat_manual2" {
#  name   = var.NAT_NAME2
#  router = google_compute_router.router2.name
#  region = google_compute_router.router2.region

#  nat_ip_allocate_option = "MANUAL_ONLY"
#  nat_ips                = google_compute_address.r2-address.*.self_link
#  enable_dynamic_port_allocation      = true
#  enable_endpoint_independent_mapping = false
#  min_ports_per_vm                    = 4096
#  max_ports_per_vm                    = 65536 
#  source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
#  subnetwork {
#    name                    = google_compute_subnetwork.subnet2.id
#    source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
#  }
#}

#Firewall Rule
#resource "google_compute_firewall" "datalake-prod-composer-internal-ingress" {
#  name    = "datalake-prod-composer-internal-ingress"
#  network = google_compute_network.prod-vpc.name

#  allow {
#    protocol = "tcp"
#  }
#  allow {
#    protocol = "udp"
#  }
#  priority = 1000
#  source_ranges = var.datalake_prod_composer_ingress_allow_source_ip
#  target_tags = var.datalake_prod_composer_ingress_target_tag
#}

$ nano provider.tf
terraform {
  required_providers {
    google = {
      source  = "hashicorp/google"
      version = "4.40.0"
    }
  }

  required_version = ">= 0.14"
}

provider "google" {
  project = var.PROJECT_ID
  region  = var.REGION
}

$ terraform init

$ terraform plan

$ terraform apply

$ gcloud container clusters get-credentials k1 --zone asia-east1-b --project $(gcloud config get-value project 2> /dev/null)

$ kubectl get nodes
NAME                                STATUS   ROLES    AGE     VERSION
gke-k1-default-pool-e0781567-7ljm   Ready    <none>   5m39s   v1.32.4-gke.1106006

3. 清理環境 (窮到只剩錢的人可跳過此步驟)

不做的話,殘留在 Google Cloud 上的物件會一直產生費用

# 將所有建立的物件清除
$ terraform destroy

# 移除 billing account 與 Project 的連結
$ gcloud billing projects unlink $GOOGLE_CLOUD_PROJECT
billingAccountName: ''
billingEnabled: false
name: projects/gketest-462015/billingInfo
projectId: gketest-462015

# 刪除 project
$ gcloud projects delete -q $GOOGLE_CLOUD_PROJECT
Deleted [https://cloudresourcemanager.googleapis.com/v1/projects/gketest-462015].

You can undo this operation for a limited period by running the command below.
    $ gcloud projects undelete gketest-462015

See https://cloud.google.com/resource-manager/docs/creating-managing-projects for information on shutting down projects.

# 確認 project 已被刪除
$ gcloud projects describe $GOOGLE_CLOUD_PROJECT
createTime: '2025-06-05T15:39:13.542189Z'
lifecycleState: DELETE_REQUESTED
name: gketest
projectId: gketest-462015
projectNumber: '876476350521'