# 1. 下載 alpine cloud init 硬碟檔
$ wget https://dl-cdn.alpinelinux.org/alpine/v3.19/releases/cloud/nocloud_alpine-3.19.1-x86_64-bios-cloudinit-r0.qcow2
# 2. 安裝必要套件
$ apt update; apt install -y libguestfs-tools
# 3. 在 Alpine cloud init 硬碟檔安裝 qemu-guest-agent、bash、sudo 套件
$ virt-customize --install qemu-guest-agent,bash,sudo -a nocloud_alpine-3.19.1-x86_64-bios-cloudinit-r0.qcow2
# 4. 建立 Alpine 虛擬主機
$ vmid="9000"
$ qm create "$vmid" --name alp-cloudinit-v3.19.1 --memory 4096 --sockets 1 --cores 4 --net0 virtio,bridge=vmbr0
# 5. 匯入 alpine cloud init 硬碟檔,
$ qm importdisk "$vmid" nocloud_alpine-3.19.1-x86_64-bios-cloudinit-r0.qcow2 local-lvm
# 6. 設定使用 SCSI 介面連接 虛擬硬碟檔
$ qm set "$vmid" --scsihw virtio-scsi-pci --scsi0 local-lvm:vm-"$vmid"-disk-0
# 7. 改變硬碟大小
$ qm resize "$vmid" scsi0 50G
# 8. 加入 Cloud Init 功能
$ qm set "$vmid" --ide2 local-lvm:cloudinit
# 9. Set boot device
$ qm set "$vmid" --boot c --bootdisk scsi0
# 10. Enable serial device
$ qm set "$vmid" --serial0 socket --vga serial0
# 11. 撰寫 cloud-init User data 設定檔
$ mkdir -p /var/lib/vz/snippets/
$ nano /var/lib/vz/snippets/user"$vmid".yml
#cloud-config
hostname: alpine
package_update: true
packages:
- nano
write_files:
- path: /etc/resolv.conf
permissions: "0644"
owner: root
content: |
nameserver 8.8.8.8
users:
# - default
- name: bigred
# groups: bigred
no_user_group: true
lock_passwd: false
sudo: ALL=(ALL) NOPASSWD:ALL
plain_text_passwd: 'bigred'
homedir: /home/bigred
shell: /bin/bash
# ssh_authorized_keys:
# -
ssh_pwauth: True
#bootcmd:
# - echo -e "bigred\nbigred" | adduser -s /bin/bash bigred
# - echo -e "rancher\nrancher" | adduser -s /bin/bash rancher
# - echo -e "bigred\nbigred" | passwd root
# 12. 撰寫 cloud-init Network data 設定檔
$ nano /var/lib/vz/snippets/network"$vmid".yml
version: 1
config:
- type: physical
name: eth0
subnets:
- type: static
address: '192.168.61.21'
netmask: '255.255.255.0'
gateway: '192.168.61.2'
# 12. 設定 VM 套用設定檔
$ qm set "$vmid" --cicustom "user=local:snippets/user"$vmid".yml,network=local:snippets/network"$vmid".yml"
# 13. test
$ qm start "$vmid"
$ ssh bigred@192.168.61.21
Welcome to Alpine!
...
$ exit
$ qm stop "$vmid"
# 14. 將 VM 製作成 Template
$ qm template "$vmid"
# 1. 創建一個名為 tfuser 的使用者
$ pveum user add terraform@pve
# 2. 創建一個名為 Terraform 的新角色,並賦予這個角色一系列與虛擬機管理和數據存儲管理相關的權限。這些權限允許用戶使用該角色來創建、配置、克隆、監控和管理虛擬機,以及管理數據存儲空間
$ pveum role add Terraform -privs "Datastore.AllocateSpace Datastore.Audit Pool.Allocate Sys.Audit Sys.Console Sys.Modify VM.Allocate VM.Audit VM.Clone VM.Config.CDROM VM.Config.Cloudinit VM.Config.CPU VM.Config.Disk VM.Config.HWType VM.Config.Memory VM.Config.Network VM.Config.Options VM.Migrate VM.Monitor VM.PowerMgmt SDN.Use"
# 3. 將 TerraformProv 角色分配給 tfuser,讓它擁有相應的權限。
$ pveum aclmod / -user terraform@pve -role Terraform
# 4. 產生 Terraform 要使用的 API Token
$ pveum user token add terraform@pve provider --privsep=0
┌──────────────┬──────────────────────────────────────┐
│ key │ value │
╞══════════════╪══════════════════════════════════════╡
│ full-tokenid │ terraform@pve!provider │
├──────────────┼──────────────────────────────────────┤
│ info │ {"privsep":"0"} │
├──────────────┼──────────────────────────────────────┤
│ value │ 1fb0b04e-5673-4d5c-b580-ae602c718d89 │
└──────────────┴──────────────────────────────────────┘
要儲存 Token Secrt,因為之後無法取得它。
$ apt-get update && apt-get install -y gnupg software-properties-common
$ wget -O- https://apt.releases.hashicorp.com/gpg | \
gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
$ echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] \
https://apt.releases.hashicorp.com $(lsb_release -cs) main" | \
tee /etc/apt/sources.list.d/hashicorp.list
$ apt update && apt install -y terraform
# 1. 建立儲存設定檔的工作目錄
$ mkdir terraform && cd terraform
# 2. 產生 backend config
$ cat <<EOF > backend.tf
terraform {
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "3.0.1-rc2"
}
}
backend "local" {
}
}
EOF
# 3. 產生 provider config
$ cat <<EOF > provider.tf
provider "proxmox" {
pm_api_url = "https://192.168.61.11:8006/api2/json"
# api token id is in the form of: <username>@pam!<tokenId>
pm_api_token_id = "terraform@pve!provider"
# this is the full secret wrapped in quotes.
pm_api_token_secret = var.PROXMOX_API_SECRET
pm_tls_insecure = true
# debug log
# pm_log_enable = true
# pm_log_file = "terraform-plugin-proxmox.log"
# pm_debug = true
# pm_log_levels = {
# _default = "debug"
# _capturelog = ""
# }
}
EOF
# 4. 產生變數設定檔
$ cat <<EOF > vars.tf
variable "PROXMOX_API_SECRET" {
type = string
}
variable "ssh_key" {
default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCa5vPXuVVZOQJw+mvQizMqOY+sJryVg/yPQHHq1NfLVZREbjUHvIMJcTVzEVpy6oO/v6pQ4wYbgUQqPmCP/Np/VvX6YBhdnhc99V8Jn9H99QhmXHVfrG3GgBQ5scupP5VUaPZ372gVsidu63uNyDvnSwZ0cAXUxvB3hANQBH0eJ13/zVKBkWVGQTd/eQJBQx5sxpwXW4sBcPaYdpt+Njtc31GqxwgSzTO9HAHp1aSDq4SA3VEjUNg7/ysW/MwuuslZBsz0wmi55nBa9dkHG1ao72c9viVWc+fz+nLjMRq6QDUxJd2TjQH4k7QexibDXCZydxac2tMRKRo3ukrdSs1TzjcnUdWtTnFM+wDEkWnObagkp9feRg5+7RWh+3qc2zq1/OCEMqGd6Gu17VUxWK4L3QEq+k13vgphuzQpziRiolh8ETvSM0EXajuHj5RoJQKAEhfURvFgH+qa5nUw8DTjtR2gfkevKXOJBTDT9GauTItZJi03fQhVAng1blP1d8OkD+yItHthiylBYW+UdMT/HCgc9JtkDzjtmfRH2HvDZnFJ1O46EX74wycEs7Lgyeu3vHIYC+AP7/XprT9jIT5UHo3MOJ4vZwP0OcFhqg0x+8ptjzWRtn1SubcNRsYlJlc59gqAPSyrsAtYL5FGohgGokxlRX8hDjUhcLmknFQhiw== root@pve"
}
variable "proxmox_host" {
default = "pve"
}
variable "template_name" {
default = "alp-cloudinit-v3.19.1"
}
EOF
# 5. 產生 Terraform VM 設定檔 main.tf
$ nano main.tf
resource "proxmox_vm_qemu" "ckad" {
count = 1
name = "alp-${count.index + 1}"
target_node = var.proxmox_host
vmid = "101${count.index + 1}"
clone = var.template_name
os_type = "cloud-init"
cpu = "host"
cores = 4
sockets = 1
memory = 4096
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
disks {
scsi {
scsi0 {
disk {
size = "50G"
storage = "local-lvm"
}
}
}
ide {
ide3 {
cloudinit {
storage = "local-lvm"
}
}
}
}
network {
model = "virtio"
bridge = "vmbr0"
tag = -1
firewall = false
link_down = false
}
ipconfig0 = "ip=192.168.61.2${count.index + 1}/22,gw=192.168.61.2"
ciuser = "terraform"
sshkeys = <<EOF
${var.ssh_key}
EOF
}
$ tree ../terraform
../terraform
├── backend.tf
├── main.tf
├── provider.tf
└── vars.tf
1 directory, 4 files
$ export PROXMOX_API_SECRET="1fb0b04e-5673-4d5c-b580-ae602c718d89"
# inicialise the terraform modules
$ terraform init --upgrade
# generate a terraform plan
$ terraform plan -out ckad.tfplan
# deploy the VMs
$ terraform apply -parallelism=1 -auto-approve ckad.tfplan
# cleanup
$ terraform plan -destroy -out ckad.tfplan
$ terraform apply "ckad.tfplan"