# CI/CD Training
---
```bash=
# Create runner.tf and paste in runner module readme contents
# Add the following right before outputs
# The module above requires an existing ECR repository
resource "aws_ecr_repository" "this" {
name = "demo-nonprod-dev-web-ecs"
}
# Modify Line 10 with the following
ecr_url = aws_ecr_repository.this.repository_url
# This overrides the dependency on the pipeline module by manually creating the ECR Repository now instead of during the pipeline deploy
# runner.tf Should look like this
# =========================================================================================================
module "ecs-fargate-runner" {
source = "git::ssh://git@github.com/wcet-dope-aio/terraform-aws-fargate-runner?ref=v1.0.6.6"
vpc_id = data.terraform_remote_state.app.outputs.vpc_id
subnets_map = data.terraform_remote_state.env.outputs.subnet_ids
app_kms_key = data.terraform_remote_state.env.outputs.kms_key_arn
secret_map = data.terraform_remote_state.env.outputs.secrets
ecr_url = aws_ecr_repository.this.ecr_repository_url
alb_target_protocol = "HTTP"
alb_target_port = 8080
alb_healthcheck_url = "/index.html"
alb_idle_timeout = 120
alb_healthcheck_interval = 30
alb_healthcheck_timeout = 20
alb_healthcheck_healthy_threshold = 2
alb_healthcheck_unhealthy_threshold = 2
ecs_asg_min_capacity = 1
ecs_asg_max_capacity = 1
ecs_service_desired_count = 0
#enable_autoscaling = "true"
#create_alb = false
alb_insec_listeners = [
{
"listener on port 80" = "80,HTTP"
},
]
alb_sec_listeners = []
alb_sec_listeners_extra_certs = []
ecs_task_port_mappings = [
{
containerPort = 8080
hostPort = 8080
protocol = "tcp"
}
]
# Define any plain text environment variables you need as shown below
/*
ecs_task_environment_vars = [
{ name = "ENV_VARIABLE_NAME", value = "ENV_VARIABLE_VALUE" },
{ name = "ENV_VARIABLE_NAME_2", value = "ENV_VARIABLE_VALUE_2" }
]
*/
# Uncomment and configure this section in order to attach
# shared EFS volumes to your containers
/*
shared_volumes = {
"content" = {
mount_point = "/srv/content"
},
"common-assets" = {
mount_point = "/srv/common"
file_system_id = "fs-1324abcd"
read_only = true
},
"images" = {
mount_point = "/srv/images"
performance_mode = "maxIO"
provisioned_throughput_in_mibps = 1000
}
}
*/
# This module configures container logs be sent to a Cloudwatch log group
# You can additionally configure those logs be forwarded to a log stream
# by uncommenting the line below
#stream = data.terraform_remote_state.env.outputs.streams["ecs"]
#CONTAINER ENVIRONMENT VARIABLES
# ecs_task_environment_vars = [
# { name = "ENV_VARIABLE_NAME", value = "ENV_VARIABLE_VALUE" },
# { name = "ENV_VARIABLE_NAME_2", value = "ENV_VARIABLE_VALUE_2" }
#]
force_destroy = data.terraform_remote_state.app.outputs.force_destroy # app level >= v0.12.0.1
}
# The module above requires an existing ECR repository
resource "aws_ecr_repository" "this" {
name = "demo-nonprod-dev-web-ecs"
}
output "sgid_tier" { value = module.ecs-fargate-runner.sgid_tier }
output "sgid_alb" { value = module.ecs-fargate-runner.sgid_alb }
output "load_balancer_arn" { value = module.ecs-fargate-runner.load_balancer_arn }
output "load_balancer_dns_name" { value = module.ecs-fargate-runner.load_balancer_dns_name }
output "alb_target_group" { value = module.ecs-fargate-runner.alb_target_group }
output "ecs_service" { value = module.ecs-fargate-runner.ecs_service }
output "ecs_service_arn" { value = module.ecs-fargate-runner.ecs_service_arn }
output "ecs_cluster" { value = module.ecs-fargate-runner.ecs_cluster }
output "ecs_cluster_arn" { value = module.ecs-fargate-runner.ecs_cluster_arn }
output "managed_efs_volumes" { value = module.ecs-fargate-runner.managed_efs_volumes }
# =========================================================================================================
# This created the ECS Cluster, ECS Service, ECS Task Definition, ALB, and Autoscaling Groups.
# Now we need a container image to deploy to our cluster
# Clone ECS Sample to another folder
git clone git@github.com:wcet-dope-aio/app-wbtsaio-ecs-sample.git ~sacevedo/github-work/warnerBros/aio/repos/app-wbtsaio-ecs-sample/
# We're going to build this image, and then push it to ECR. Make Sure Docker is Running Locally
cd ~sacevedo/github-work/warnerBros/aio/repos/app-wbtsaio-ecs-sample/
cp -rp app-configs-sample app-configs
./build
./run-local
curl http://localhost:8080/
gimme-aws-creds -f aws-aio-poc
aws --profile aws-aio-poc ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 491440728227.dkr.ecr.us-west-2.amazonaws.com
# Login Succeeded!
docker tag app-wbtsaio-ecs-sample:latest 491440728227.dkr.ecr.us-west-2.amazonaws.com/aws-sacevedo-nonprod-dev-web:latest
docker push 491440728227.dkr.ecr.us-west-2.amazonaws.com/aws-sacevedo-nonprod-dev-web:latest
# Add SG Rules
vim tier-sg-rules.tf
# =========================================================================================================
module "sg_rules" {
source = "git::ssh://git@github.com/wcet-dope-aio/terraform-aws-sg-rules?ref=v2.0.1.0"
security_group_rules = [
# Ingress into ECS Cluster
[module.ecs-fargate-runner.sgid_tier, "ingress", "tcp", "8080", module.ecs-fargate-runner.sgid_alb, "Allow Ingress from ALB on 8080"],
# Egress out of ECS Cluster
[module.ecs-fargate-runner.sgid_tier, "egress", "tcp", "443", "0.0.0.0/0", "When Fargate launches the task, it needs to pull an image from ECR. An outbound request is made by the Fargate ENI on port 443. That request would be dropped without this rule"],
# Egress out of the ALB
[module.ecs-fargate-runner.sgid_alb, "egress", "tcp", "8080", module.ecs-fargate-runner.sgid_tier, "Allow ALB to Talk to ECS Tasks"]
]
vpc_id = data.terraform_remote_state.app.outputs.vpc_id
}
resource "aws_security_group_rule" "gp" {
description = "Allow inbound web traffic from global protect"
from_port = 80
security_group_id = module.ecs-fargate-runner.sgid_alb
protocol = "tcp"
to_port = 80
type = "ingress"
# Look up the correct 'VPC Customer Managed Prefix List' id for 'Global VPN Earth'
# in your AWS account containing Global Protect VPN endpoints.
prefix_list_ids = ["pl-0e439b029f734a574"] # MAY BE DIFFERENT IN YOUR AWS ACCOUNT!
}
# =========================================================================================================
# Modify the ecs_service_desired_count in the runner.tf from ZERO to ONE and uncomment enable auto-scaling, then apply
ecs_service_desired_count = 1
enable_autoscaling = "true"
# =========================================================================================================
# Now on VPN, we can hit the following (obtained from the last output)
demo-nonprod-dev-web-ecs-alb-491131607.us-west-2.elb.amazonaws.com
# =========================================================================================================
# Create pipeline.tf and paste in pipeline module readme contents. Save file
# Notice,the module requires you to pass in a secret that contains a slackwebhook-url. We will go back to the environment level to create this
# Add the following to the enviroment tf file env.tf
secrets = [
{
name = "slack-webhook-url"
description = "The secret is for the webhook url used to post messages to a slack channel. Used in the web tier and the db tier"
},
# =========================================================================================================
# env.tf should now look like
# =========================================================================================================
module "env" {
source = "git::ssh://git@github.com/wcet-dope-aio/terraform-aws-env-level.git?ref=v1.0.4.4"
ami_names = ["al2022-ami-minimal-2022.0.20221103.1-kernel-5.15-x86_64"]
subnet_map = data.terraform_remote_state.app.outputs.subnet_map
vpc_id = data.terraform_remote_state.app.outputs.vpc_id
vpc_igw_id = data.terraform_remote_state.app.outputs.vpc_igw_id
secrets = [
{
name = "slack-webhook-url"
description = "The secret is for the webhook url used to post messages to a slack channel. Used in the web tier and the db tier"
},
]
}
output "appname" { value = "${module.env.appname}" }
output "environment" { value = module.env.environment }
output "subnet_ids" { value = module.env.subnet_ids }
output "route_table_ids" { value = module.env.route_table_ids}
output "vpc_endpoint_s3" { value = module.env.vpc_endpoint_s3 }
output "nat_gateway_az_a" { value = module.env.nat_gateway_az_a }
output "nat_gateway_az_b" { value = module.env.nat_gateway_az_b }
output "encrypted_ami_names" { value = module.env.encrypted_ami_names }
output "kms_key_arn" { value = module.env.kms_key_arn }
output "secrets" { value = module.env.secrets }
output "parameters" { value = module.env.parameters }
output "lipr_outpost" { value = module.env.lipr_outpost }
output "streams" { value = module.env.streams }
# Uncomment this if you need to expose the
# LIPR Lambda functions (not common)
#output "lipr_outpost_lambda_functions" { value = module.env.lipr_outpost_lambda_functions }
# =========================================================================================================
secrets-helper.py list
secrets-helper.py update slack-webhook-url
# lookup the slack channel
# Have to make the runner module "forget" about the ecr, but not delete it. This is in preparation for setting up a CI/CD pipeline.
# because our "pipeline" terraform module wants to manage the ecr resource itself.
# Modify runner.tf and comment out `ecr_url`, add a new line under it:
ecr_url = "491440728227.dkr.ecr.us-west-2.amazonaws.com/demo-nonprod-dev-web-ecs"
# Then also comment out the definition of an ECR below the module:
#
# The module above requires an existing ECR repository
#resource "aws_ecr_repository" "this" {
# name = "demo-nonprod-dev-web-ecs"
#}
# Then we need to make terraform "forget" about the ECR, we don't want it to delete it!
terraform plan # check to see if it shows only the ECR being destroyed
# Remove the ECR from state
terraform state rm "aws_ecr_repository.this"
terraform apply # should run clean
# Now import the ECR registry back into Terraform state, but under a different target
terraform import "module.pipeline.aws_ecr_repository.this" "demo-nonprod-dev-web-ecs"
terraform apply
# This creates an s3 bucket to hold app config and app source in, a code build project, and ECR (the one we imported)
# some cloudwatch events for building/deploying new images as they appear in ECR, and some helper scripts.
#
# We have the pipeline infrastructure, but aren't finished setting it up.
#
# 1. Set up a Github action inside of the app repository so that it can copy the source code on push to our new S3 Build bucket
# 2. Run the action to upload the repository to S3
# 3. Test the pipeline by running the deploy-version.sh helper script
#
# 1. Go to the AWS Console for the aws-aio-poc account, IAM, Users, <app-env-tier-gitlhub>, security credentials, and add a new API key. Copy down the key id and secret.
# 1b. Go to the demo repo, settings, and add a new environment. Call it "dev-<USERNAME>" and add two environment variables to it:
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
#
# Populate those variables accordingly.
#
# cd to your checkout of app-wbtsaio-ecs-sample and create:
cd <app-wbtsaio-ecs-sample>
mkdir -p .github/workflow
cp </path/to/github-workflow-dev.yml> .github/workflow/dev-$(WHOAMI).yml
git commit -am "Added github workflow"
git push
# This should cause our new action to run. Go to the github repo, actions, view the action output if you are curious.
# This should upload a source.bundle and version.txt file to your S3 build bucket.
#
# Now go to your app repo and copy the app-configs-sample folder to a new folder in your web tier called app-configs
cp -r app-configs-sample /path/to/web-ecs/app-configs
cd /path/to/web-ecs
terraform apply # should show files being uploaded to S3, including a Dockerfile
```
# 4: Test various deployment workflows
```bash=
# By default, the pipeline infrastructure is not configured to automatically build and deploy when code is pushed to GitHub
# so let's manually trigger a build/deploy and verify the app "works"
- manually trigger the build/deploy
- cli
- console
- AUTOMATIC: trigger from github
- enable autobuild in pipeline.tf and run terraform apply
- make a change to source code and push
- AUTOMATIC: trigger from github but only build certain branches
- enable autobuild
- populate allowed_versions in pipeline.tf and run terraform apply
- make a change to source code and push
```
```
autobuild = false
github push --> action runs and creates S3://<build-bucket>/app-source/version.txt, S3://<build-bucket>/app-source/source.bundle --> That's it.
autobuild = true
github push -->
action runs and creates S3://<build-bucket>/app-source/version.txt, S3://<build-bucket>/app-source/source.bundle -->
CloudWatch Event sees that app-source/version.txt was changed and runs a lambda function -->
lambda copies S3://<build-bucket>/app-sourceversion.txt to S3://<build-bucket>/app-version-deployed/version.txt -->
CloudWatch Event sees that app-version-deployed/version.txt was changed and calls CodeBuild -->
CodeBuild copies app-source-deployed/version.txt and app-source/source.bundle, checks out commit/tag/branch defined in version.txt, calls docker build and pushes final image to ECR -->
CloudWatch event sees new image in ECR and triggers deployment against ECS Service
autobuild = true, allowed_versions = "/ref/remotes/dev"
The only difference here, is the lambda that copies app-source/version.txt -->
app-version-deployed.txt first checks if an environment variable "ALLOWED_VERSIONS" is a regex match against the contents of app-source/version.txt. If not, nothing happens. If it is, the lambda copies the file like before to app-version-deployed/version.txt, which causes the rest of the workflow to continue.
```