Groupe :
Nous travaillons dans une VM de Debian 11.
Installation de python pip
$ sudo apt install python3-pip
Installation de Ansible
$ python3 -m pip install --user ansible
Mise à jour de la variable d'environnement
$ export PATH=$PATH:/home/hyperion/.local/bin
Test de la version d'Ansible
$ ansible --version
ansible [core 2.14.0]
config file = None
configured module search path = ['/home/hyperion/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/hyperion/.local/lib/python3.9/site-packages/ansible
ansible collection location = /home/hyperion/.ansible/collections:/usr/share/ansible/collections
executable location = /home/hyperion/.local/bin/ansible
python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] (/usr/bin/python3)
jinja version = 3.1.2
libyaml = True
Installation des requirements
$ sudo apt install wget curl unzip software-properties-common gnupg2 -y
Ajout de la clé GPG de Hashicorp dans le serveur de clé d'APT
$ curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add -
Ajout du dépot APT contenant Terraform
$ sudo apt-add-repository "deb [arch=$(dpkg --print-architecture)] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
Mise à jour des dépots et installation de Terraform
$ sudo apt update
$ sudo apt install terraform -y
Vérification de la version de terraform
$ terraform -v
Terraform v1.3.5
on linux_amd64
Installation des dépendances
$ sudo apt install ca-certificates curl gnupg lsb-release
Ajout de la clé GPG
$ sudo mkdir -p /etc/apt/keyrings
$ curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
Ajout du dépot contenant Docker
$ echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
Mise à jour et installation de Docker
$ sudo apt update
$ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
Ajout du compte utilisateur dans le groupe docker
$ sudo groupadd docker
$ sudo usermod -aG docker $USER
# Restart session
$ newgrp docker
$ docker run hello-world
Unable to find image 'hello-world:latest' locally
latest: Pulling from library/hello-world
2db29710123e: Pull complete
Digest: sha256:faa03e786c97f07ef34423fccceeec2398ec8a5759259f94d99078f264e9d7af
Status: Downloaded newer image for hello-world:latest
Hello from Docker!
Fichier de configuration de Terraform
terraform {
required_providers {
docker = {
source = "kreuzwerker/docker"
version = "2.23.1"
}
}
}
provider "docker" {
host = "unix:///var/run/docker.sock"
}
# Pulls the image
resource "docker_image" "ubuntu-sshd" {
name = "takeyamajp/ubuntu-sshd"
}
resource "docker_network" "private_network" {
name = "unilasalle"
}
# Create a container
resource "docker_container" "database" {
image = docker_image.ubuntu-sshd.image_id
name = "database"
ports {
internal = "22"
external = "2200"
}
networks_advanced {
name = docker_network.private_network.name
}
}
resource "docker_container" "webserver" {
image = docker_image.ubuntu-sshd.image_id
name = "webserver"
ports {
internal = "22"
external = "2201"
}
networks_advanced {
name = docker_network.private_network.name
}
}
Initialisation de l'environnement
$ terraform init
Application du plan de construction
$ terraform apply
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# docker_container.database will be created
+ resource "docker_container" "database" {
+ attach = false
+ bridge = (known after apply)
+ command = (known after apply)
+ container_logs = (known after apply)
+ container_read_refresh_timeout_milliseconds = 15000
+ entrypoint = (known after apply)
+ env = (known after apply)
+ exit_code = (known after apply)
+ gateway = (known after apply)
+ hostname = (known after apply)
+ id = (known after apply)
+ image = (known after apply)
+ init = (known after apply)
+ ip_address = (known after apply)
+ ip_prefix_length = (known after apply)
+ ipc_mode = (known after apply)
+ log_driver = (known after apply)
+ logs = false
+ must_run = true
+ name = "database"
+ network_data = (known after apply)
+ read_only = false
+ remove_volumes = true
+ restart = "no"
+ rm = false
+ runtime = (known after apply)
+ security_opts = (known after apply)
+ shm_size = (known after apply)
+ start = true
+ stdin_open = false
+ stop_signal = (known after apply)
+ stop_timeout = (known after apply)
+ tty = false
+ wait = false
+ wait_timeout = 60
+ healthcheck {
+ interval = (known after apply)
+ retries = (known after apply)
+ start_period = (known after apply)
+ test = (known after apply)
+ timeout = (known after apply)
}
+ labels {
+ label = (known after apply)
+ value = (known after apply)
}
+ networks_advanced {
+ aliases = []
+ name = "unilasalle"
}
+ ports {
+ external = 2200
+ internal = 22
+ ip = "0.0.0.0"
+ protocol = "tcp"
}
}
# docker_container.webserver will be created
+ resource "docker_container" "webserver" {
+ attach = false
+ bridge = (known after apply)
+ command = (known after apply)
+ container_logs = (known after apply)
+ container_read_refresh_timeout_milliseconds = 15000
+ entrypoint = (known after apply)
+ env = (known after apply)
+ exit_code = (known after apply)
+ gateway = (known after apply)
+ hostname = (known after apply)
+ id = (known after apply)
+ image = (known after apply)
+ init = (known after apply)
+ ip_address = (known after apply)
+ ip_prefix_length = (known after apply)
+ ipc_mode = (known after apply)
+ log_driver = (known after apply)
+ logs = false
+ must_run = true
+ name = "webserver"
+ network_data = (known after apply)
+ read_only = false
+ remove_volumes = true
+ restart = "no"
+ rm = false
+ runtime = (known after apply)
+ security_opts = (known after apply)
+ shm_size = (known after apply)
+ start = true
+ stdin_open = false
+ stop_signal = (known after apply)
+ stop_timeout = (known after apply)
+ tty = false
+ wait = false
+ wait_timeout = 60
+ healthcheck {
+ interval = (known after apply)
+ retries = (known after apply)
+ start_period = (known after apply)
+ test = (known after apply)
+ timeout = (known after apply)
}
+ labels {
+ label = (known after apply)
+ value = (known after apply)
}
+ networks_advanced {
+ aliases = []
+ name = "unilasalle"
}
+ ports {
+ external = 2201
+ internal = 22
+ ip = "0.0.0.0"
+ protocol = "tcp"
}
}
# docker_image.ubuntu-sshd will be created
+ resource "docker_image" "ubuntu-sshd" {
+ id = (known after apply)
+ image_id = (known after apply)
+ latest = (known after apply)
+ name = "ubuntu-sshd:ubuntu22.04"
+ output = (known after apply)
+ repo_digest = (known after apply)
}
# docker_network.private_network will be created
+ resource "docker_network" "private_network" {
+ driver = (known after apply)
+ id = (known after apply)
+ internal = (known after apply)
+ ipam_driver = "default"
+ name = "unilsalle"
+ options = (known after apply)
+ scope = (known after apply)
+ ipam_config {
+ aux_address = (known after apply)
+ gateway = (known after apply)
+ ip_range = (known after apply)
+ subnet = (known after apply)
}
}
Plan: 4 to add, 0 to change, 0 to destroy.
Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
Test de connexion sur le conteneur
$ ssh -p 2200 10.0.3.30
$ ssh -p 2201 10.0.3.30
Fichier de l'inventaire Ansible
db:
hosts:
database:
ansible_port: 2200
ansible_host: 10.0.3.30
ansible_user: root
web:
hosts:
webserver:
ansible_port: 2201
ansible_host: 10.0.3.30
ansible_user: root
Ajout des /etc/hosts
172.19.0.2 webserver
172.19.0.3 database
Ajout des clés SSH
$ ssh-copy-id root@webserver
$ ssh-copy-id root@database
Test Ansible
$ ansible all -i inventory.yml -m ping
webserver | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
database | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
Script Ansible pour l'ajout d'une banière
---
- name: Install Banner
hosts: all
vars:
ansible_python_interpreter: "/usr/bin/python3"
tasks:
- name: Copy Banner to server
ansible.builtin.lineinfile:
path: /etc/banner
create: true
regexp: "Authorized access only, server belongs to Unilasalle$"
line: "Authorized access only, server belongs to Unilasalle 2"
- name: Add banner in SSH Settings
ansible.builtin.lineinfile:
path: /etc/ssh/sshd_config
regexp: '^#Banner none'
line: 'Banner /etc/banner'
owner: root
group: root
mode: 0644
notify:
- Stop container
- Start container
handlers:
- name: Stop container
community.docker.docker_container:
name: "{{ inventory_hostname }}"
state: stopped
delegate_to: 127.0.0.1
- name: Start container
community.docker.docker_container:
name: "{{ inventory_hostname }}"
state: started
delegate_to: 127.0.0.1
Pour la partie de redémarrage des conteneurs, on a eu
besoin d'installer une dépendence Ansible Galaxy
$ ansible-galaxy collection install community.docker
On délègue à 127.0.0.1
sinon la commande est exécutée sur la machine remote et non sur le serveur local.
Fichier Ansible
---
- name: Install Dynamic Banner
hosts: all
vars:
ansible_python_interpreter: "/usr/bin/python3"
tasks:
- name: Copy Banner template to server
template:
src: template/banner
dest: /etc/banner
mode: 0755
- name: Add banner in SSH Settings
ansible.builtin.lineinfile:
path: /etc/ssh/sshd_config
regexp: '^#Banner none'
line: 'Banner /etc/banner'
owner: root
group: root
mode: 0644
notify:
- Stop container
- Start container
handlers:
- name: Stop container
community.docker.docker_container:
name: "{{ inventory_hostname }}"
state: stopped
delegate_to: 127.0.0.1
- name: Start container
community.docker.docker_container:
name: "{{ inventory_hostname }}"
state: started
delegate_to: 127.0.0.1
Fichier Template
Hostname : {{ ansible_host }}
Groups : {{ group_names|join(', ') }}
Fichier Ansible
---
- name: Add group and users
hosts: all
tasks:
- name: Add group to server
ansible.builtin.group:
name: "{{ item }}"
state: present
loop:
- "supervision"
- "applicative"
- name: Add user to server and group
ansible.builtin.user:
name: "{{ item }}"
group: "{{ item }}"
loop:
- "supervision"
- "applicative"
- name: Add web user
hosts: web
tasks:
- name: Add web user
ansible.builtin.user:
name: web
group: applicative
- name: Add db user
hosts: db
tasks:
- name: Add dbadmin user
ansible.builtin.user:
name: dbadmin
group: applicative
Fichier Ansible
---
- name: Install apache2
hosts: web
become: true
tasks:
- name: Update APT
ansible.builtin.apt:
update_cache: yes
- name: Install Apache2
ansible.builtin.apt:
name: apache2
state: present
- name: Check if apache is started
ansible.builtin.service:
state: started
name: apache2
- name: Enabled apache service
ansible.builtin.service:
enabled: yes
name: apache2
- name: Install PHP with PSQL
hosts: web
tasks:
- name: Update APT
ansible.builtin.apt:
update_cache: yes
- name: Install PHP
ansible.builtin.apt:
name: php
state: present
- name: Install PGSQL module for PHP
ansible.builtin.apt:
name: php-pgsql
state: present
Fichier Ansible
---
- name: Install DB
hosts: db
become: true
tasks:
- name: Update APT
ansible.builtin.apt:
update_cache: yes
- name: Install Postgres
ansible.builtin.apt:
name: "{{ item }}"
state: present
loop:
- "postgresql"
- name: Change port PSQL
ansible.builtin.lineinfile:
path: "/etc/postgresql/14/main/postgresql.conf"
regexp: '^port = [0-9]+.*'
line: 'port = 5431'
notify:
- Restart psql
handlers:
- name: Restart psql
ansible.builtin.service:
name: postgresql
state: restarted
Tutoriel Terraform
Feb 13, 2024Management RH Introduction Concept de la performance 4 significations : Les Actions et les Processus (pertinence, définition des objectifs, stratégie) Les Succès (individuel, collectif, client) Le Résultat des actions (mesure, qualité, quantité) Les Capacités et moyens (alloués par l'entreprise)
Dec 14, 2022TD Chef de Projet : Téléphonie IP Point de départ PABX : environ 100 Utilisateurs Nom : Brasserie Internationale Bibeene Postes : Effectifs Postes Effectifs Nom de ligne
Nov 30, 2022Téléphonie pour entreprises Pourquoi la Téléphonie IP Prix Flexibilité : Autant sur site qu'en remote (télétravail) :::success SIP : Session Initiation Protocol qui permet d'initier une session et donc de faire sonner le téléphone destinataire. PABX/iPBX : Permet de faire fonctionner les téléphones
Nov 30, 2022or
By clicking below, you agree to our terms of service.
New to HackMD? Sign up