project
testgke
,點選 "建立" 按鈕gketest
專案後,點選左上角按鈕Kubernetes Engine
按鈕$ mkdir gcp-terraform; cd gcp-terraform
$ nano gke.tf
resource "google_container_cluster" "gke_cluster" {
name = "k1"
location = "asia-east1-b"
initial_node_count = 1
network = google_compute_network.vpc.name
subnetwork = google_compute_subnetwork.subnet.name
networking_mode = "VPC_NATIVE"
logging_service = "none"
monitoring_service = "none"
private_cluster_config {
enable_private_endpoint = false
enable_private_nodes = true
master_ipv4_cidr_block = "10.13.0.0/28"
}
ip_allocation_policy {
cluster_ipv4_cidr_block = "10.98.0.0/16"
services_ipv4_cidr_block = "10.244.0.0/16"
}
node_config {
machine_type = "e2-custom-4-12288"
}
}
output "cluster_endpoint" {
value = google_container_cluster.gke_cluster.endpoint
}
output "cluster_ca_certificate" {
value = google_container_cluster.gke_cluster.master_auth.0.cluster_ca_certificate
sensitive = true
}
$ nano variable.tf
#PROJECT INFO
variable "PROJECT_ID" {
default = "gketest-462015"
}
variable "REGION" {
default = "asia-east1"
}
#VPC
variable "VPC_NAME" {
default = "k8s-vpc"
}
variable "VPC_NAME2" {
default = "k8s-vpc2"
}
variable "VPC_SUBNET_NAME" {
default = "asia-east1"
}
variable "VPC_SUBNET_NAME2" {
default = "asia-east1-2"
}
variable "IP_CIDR_RANGE" {
default = "10.10.0.0/24"
}
variable "IP_CIDR_RANGE2" {
default = "10.10.1.0/24"
}
variable "ROUTER_NAME" {
default = "demo-route"
}
variable "ROUTER_NAME2" {
default = "demo-route2"
}
variable "NAT_NAME" {
default = "demo-nat"
}
variable "NAT_NAME2" {
default = "demo-nat2"
}
#GKE
variable "GKE_LOCATION" {
default = "asia-east1-b"
}
variable "GKE_MACHINE_TYPE" {
default = "e2-custom-2-8192"
}
$ nano vpc.tf
resource "google_compute_network" "vpc" {
name = var.VPC_NAME
auto_create_subnetworks = "false"
}
resource "google_compute_subnetwork" "subnet" {
name = "${var.VPC_SUBNET_NAME}-subnet"
region = var.REGION
network = google_compute_network.vpc.name
ip_cidr_range = var.IP_CIDR_RANGE
private_ip_google_access = true
}
#resource "google_compute_subnetwork" "subnet2" {
# name = "${var.VPC_SUBNET_NAME}2-subnet"
# region = var.REGION
# network = google_compute_network.vpc.name
# ip_cidr_range = var.IP_CIDR_RANGE2
# private_ip_google_access = true
#}
resource "google_compute_router" "router" {
name = var.ROUTER_NAME
region = google_compute_subnetwork.subnet.region
network = google_compute_network.vpc.id
}
#resource "google_compute_address" "address" {
# count = 2
# name = "nat-manual-ip-${count.index}"
# region = google_compute_subnetwork.subnet.region
#}
resource "google_compute_address" "r1-address" {
name = "nat-address"
region = google_compute_subnetwork.subnet.region
}
#resource "google_compute_address" "r1-address-2" {
# name = "nat-address2"
# region = google_compute_subnetwork.subnet.region
#}
resource "google_compute_router_nat" "nat_manual" {
name = var.NAT_NAME
router = google_compute_router.router.name
region = google_compute_router.router.region
nat_ip_allocate_option = "MANUAL_ONLY"
nat_ips = [google_compute_address.r1-address.self_link]
enable_dynamic_port_allocation = true
enable_endpoint_independent_mapping = false
min_ports_per_vm = 4096
max_ports_per_vm = 65536
source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
subnetwork {
name = google_compute_subnetwork.subnet.id
source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
}
}
#resource "google_compute_router" "router2" {
# name = var.ROUTER_NAME2
# region = google_compute_subnetwork.subnet2.region
# network = google_compute_network.vpc.id
#}
#resource "google_compute_address" "r2-address" {
# name = "r2-nat-address"
# region = google_compute_subnetwork.subnet2.region
#}
#resource "google_compute_router_nat" "nat_manual2" {
# name = var.NAT_NAME2
# router = google_compute_router.router2.name
# region = google_compute_router.router2.region
# nat_ip_allocate_option = "MANUAL_ONLY"
# nat_ips = google_compute_address.r2-address.*.self_link
# enable_dynamic_port_allocation = true
# enable_endpoint_independent_mapping = false
# min_ports_per_vm = 4096
# max_ports_per_vm = 65536
# source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
# subnetwork {
# name = google_compute_subnetwork.subnet2.id
# source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
# }
#}
#Firewall Rule
#resource "google_compute_firewall" "datalake-prod-composer-internal-ingress" {
# name = "datalake-prod-composer-internal-ingress"
# network = google_compute_network.prod-vpc.name
# allow {
# protocol = "tcp"
# }
# allow {
# protocol = "udp"
# }
# priority = 1000
# source_ranges = var.datalake_prod_composer_ingress_allow_source_ip
# target_tags = var.datalake_prod_composer_ingress_target_tag
#}
$ nano provider.tf
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "4.40.0"
}
}
required_version = ">= 0.14"
}
provider "google" {
project = var.PROJECT_ID
region = var.REGION
}
$ terraform init
$ terraform plan
$ terraform apply
$ gcloud container clusters get-credentials k1 --zone asia-east1-b --project $(gcloud config get-value project 2> /dev/null)
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
gke-k1-default-pool-e0781567-7ljm Ready <none> 5m39s v1.32.4-gke.1106006
不做的話,殘留在 Google Cloud 上的物件會一直產生費用
# 將所有建立的物件清除
$ terraform destroy
# 移除 billing account 與 Project 的連結
$ gcloud billing projects unlink $GOOGLE_CLOUD_PROJECT
billingAccountName: ''
billingEnabled: false
name: projects/gketest-462015/billingInfo
projectId: gketest-462015
# 刪除 project
$ gcloud projects delete -q $GOOGLE_CLOUD_PROJECT
Deleted [https://cloudresourcemanager.googleapis.com/v1/projects/gketest-462015].
You can undo this operation for a limited period by running the command below.
$ gcloud projects undelete gketest-462015
See https://cloud.google.com/resource-manager/docs/creating-managing-projects for information on shutting down projects.
# 確認 project 已被刪除
$ gcloud projects describe $GOOGLE_CLOUD_PROJECT
createTime: '2025-06-05T15:39:13.542189Z'
lifecycleState: DELETE_REQUESTED
name: gketest
projectId: gketest-462015
projectNumber: '876476350521'
本次採用
Jul 9, 2025請在 NFS Server 的主機中執行 Step 1~4 的操作命令,Step 6 請在能夠執行 oc 命令管理 OpenShift 的主機操作
Jul 3, 2025本篇文章會介紹,如何在多台 VM 上使用 user provisioned infrastructure 的方式安裝 1 台 Master Node 和 2 台 Worker Node 架構的 OpenShift Container Platform 4.12
Jul 3, 2025當使用帶有 Calico CNI 外掛程式的 Kubeadm 叢集時,容器無法啟動,如果您描述 pod,可能會收到下列錯誤。
Jun 29, 2025or
By clicking below, you agree to our terms of service.
New to HackMD? Sign up