owned this note
owned this note
Published
Linked with GitHub
# OpenStack Adv. Networking Homework
## Prerequisite
```bash
# Install necessary packages
yum install git vim zsh tmux ansible -y
git clone https://github.com/pichuang/momorc .momorc && cd ~/.momorc
./install.sh
# Switch to tmux
tmux
# Generate SSH Key
ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa -q -N ""
export PUBLIC_KEY="$(cat ~/.ssh/id_rsa.pub)"
echo $PUBLIC_KEY
# Clone Ansible Skeleton
cd ~/
git clone https://github.com/pichuang/ansible-skeleton bastion && cd ~/bastion
# Put inventory into directory
cat << EOF > hosts
[osp:children]
ctrl-grp
storage-grp
compute-grp
network-grp
[osp:vars]
ansible_ssh_user=root
ansible_ssh_pass=r3dh4t1!
ansible_ssh_connection=ssh
public_key="PLACE_YOUR_PUBLIC_KEY_IN_HERE"
[ctrl-grp]
ctrl ansible_host=192.168.0.20
[storage-grp]
storage ansible_host=192.168.0.40
[compute-grp]
comp00 ansible_host=192.168.0.30
comp01 ansible_host=192.168.0.31
[network-grp]
net00 ansible_host=192.168.0.50
net01 ansible_host=192.168.0.51
net02 ansible_host=192.168.0.52
EOF
# Replace String
sed -Ei "s|PLACE_YOUR_PUBLIC_KEY_IN_HERE|$PUBLIC_KEY|g" hosts
# Deploy SSH Key
# Password: r3dh4t1!
ansible-playbook -i hosts ./utils/deploy_root_sshkey.yml
# Validate
ansible -i hosts all -m ping
```
## 1. [x] On the [network] and [compute] nodes, define interface configuration files for the following devices:
```
- `br-vlan` as an OVS bridge
- `bond0` as an OVS bond device connected to the `br-vlan` OVS bridge
- `eth1` and `eth2` as slaves for `bond0`
```
- For compute/network node (OVS bridge)
```bash
mkdir -p network-scripts/compute/
# Prepare configuration
cat << EOF > network-scripts/compute/ifcfg-eth1
DEVICE=eth1
TYPE=Ethernet
ONBOOT=yes
USERCTL=no
EOF
cat << EOF > network-scripts/compute/ifcfg-eth2
DEVICE=eth2
TYPE=Ethernet
ONBOOT=yes
USERCTL=no
EOF
cat << EOF > network-scripts/compute/ifcfg-bond0
DEVICE="bond0"
ONBOOT="yes"
DEVICETYPE="ovs"
TYPE="OVSBond"
OVS_BRIDGE="br-vlan"
BOND_IFACES="eth1 eth2"
#OVS_OPTIONS="bond_mode=balance-tcp lacp=active"
EOF
cat << EOF > network-scripts/compute/ifcfg-br-vlan
DEVICE="br-vlan"
ONBOOT="yes"
DEVICETYPE="ovs"
TYPE="OVSBridge"
OVSBOOTPROTO="none"
HOTPLUG="no"
EOF
# Copy those configuration into network and compute group
ansible compute-grp,network-grp -m copy -a "src=network-scripts/compute/ifcfg-eth1 dest=/etc/sysconfig/network-scripts/ifcfg-eth1"
ansible compute-grp,network-grp -m copy -a "src=network-scripts/compute/ifcfg-eth2 dest=/etc/sysconfig/network-scripts/ifcfg-eth2"
ansible compute-grp,network-grp -m copy -a "src=network-scripts/compute/ifcfg-bond0 dest=/etc/sysconfig/network-scripts/ifcfg-bond0"
ansible compute-grp,network-grp -m copy -a "src=network-scripts/compute/ifcfg-br-vlan dest=/etc/sysconfig/network-scripts/ifcfg-br-vlan"
# Turn on all interfaces
ansible compute-grp,network-grp -m shell -a "ifup eth1"
ansible compute-grp,network-grp -m shell -a "ifup eth2"
ansible compute-grp,network-grp -m shell -a "ifup br-vlan"
ansible compute-grp,network-grp -m shell -a "ifup bond0"
# Review
ansible compute-grp,network-grp -m shell -a "ovs-appctl bond/show bond0"
ansible compute-grp,network-grp -m shell -a "ovs-vsctl show"
ansible compute-grp,network-grp -m shell -a "ovs-vsctl list-br"
```
- For storage node (Non OVS bridge)
```bash
mkdir -p network-scripts/stroage
cat << EOF > network-scripts/stroage/ifcfg-eth1
DEVICE=eth1
TYPE=Ethernet
ONBOOT=yes
USERCTL=no
MASTER=bond0
SLAVE=yes
EOF
cat << EOF > network-scripts/stroage/ifcfg-eth2
DEVICE=eth2
TYPE=Ethernet
ONBOOT=yes
USERCTL=no
MASTER=bond0
SLAVE=yes
EOF
cat << EOF > network-scripts/stroage/ifcfg-bond0
DEVICE=bond0
TYPE=Bond
NAME=bond0
BONDING_MASTER=yes
BOOTPROTO=none
ONBOOT=yes
BONDING_OPTS="mode=1 miimon=100"
EOF
# Copy those configuration into network and compute group
ansible storage-grp -m copy -a "src=network-scripts/stroage/ifcfg-eth1 dest=/etc/sysconfig/network-scripts/ifcfg-eth1"
ansible storage-grp -m copy -a "src=network-scripts/stroage/ifcfg-eth2 dest=/etc/sysconfig/network-scripts/ifcfg-eth2"
ansible storage-grp -m copy -a "src=network-scripts/stroage/ifcfg-bond0 dest=/etc/sysconfig/network-scripts/ifcfg-bond0"
# Turn on all interfaces
ansible storage-grp -m shell -a "ifup bond0"
ansible storage-grp -m shell -a "ifup eth1"
ansible storage-grp -m shell -a "ifup eth2"
ansible storage-grp -m shell -a "cat /proc/net/bonding/bond0"
```
## 2. [x] Reconfigure OpenStack to use VLAN 5 on `bond0` as a data network for VXLAN tunnels.
```bash
# Prepare configuration
mkdir -p network-scripts/vlan5/
cat > network-scripts/vlan5/ifcfg-vlan5 << EOF
DEVICE="vlan5"
ONBOOT="yes"
DEVICETYPE="ovs"
TYPE="OVSIntPort"
OVS_BRIDGE="br-vlan"
OVS_OPTIONS="tag=5"
OVSBOOTPROTO="none"
IPADDR=172.16.5.30
PREFIX=24
HOTPLUG="no"
EOF
# Copy those configuration into network and compute group
ansible compute-grp,network-grp -m copy -a "src=network-scripts/vlan5/ifcfg-vlan5 dest=/etc/sysconfig/network-scripts/ifcfg-vlan5"
ansible compute-grp,network-grp -m shell -a "ifup vlan5"
# Review
ansible compute-grp,network-grp -m shell -a "ovs-vsctl show"
ansible compute-grp,network-grp -m shell -a "ovs-vsctl list-br"
```
- Verify network configuration
```bash
ansible all -m shell -a "shutdown -r now"
# Review
ansible compute-grp,network-grp -m shell -a "ovs-vsctl show"
ansible compute-grp,network-grp -m shell -a "ovs-vsctl list-br"
```
3. Reconfigure OpenStack to use VLAN as the default tenant network type.
4. Reconfigure OpenStack to use L3 HA routers by default.
5. Configure `iptables` to allow traffic to the new subnet.
```bash
# In control node
# Obtain default security group ID
sg_id=$(openstack security group list --project admin -c ID -f value)
# Enable SSH Traffic
openstack security group rule create --dst-port 22 $sg_id
# Enable ICMP Traffic
openstack security group rule create --protocol icmp $sg_id
```
6. After making the reconfigurations described above, collect copies of the following:
- Configuration files `/etc/sysconfig/network-scripts/ifcfg-*`
- Output of the `ovs-vsctl show` command from [network] and [compute] nodes
- Affected Neutron configuration files
7. [x] Create a new external network `ext-net` with subnet `ext-subnet`.
- [x] As a preliminary step, remove any other external networks if they exist in the environment.
- [x] The `ext-subnet` should have network address `10.0.0.0/24` and gateway `10.0.0.1`.
- [x] Collect the output of the `neutron net-show ext-net` and `neutron subnet-show ext-subnet` commands.
8. Create tenant networks and subnets:
- [x] Create two tenants, A and B.
- For each tenant create its tenant network using the default network type.
- Both tenant networks should have subnets `192.168.2.0/24` and routers connected to the external network.
- Collect the output of `openstack project list`, `openstack network list`, and `openstack subnet list`.
```
[root@ctrl-68d9 ~(keystone_admin)]# openstack project list
+----------------------------------+-----------+
| ID | Name |
+----------------------------------+-----------+
| 41d2e6233f1f48ae913104a6a0564194 | project-b |
| be577b7b668844be8d3abbd75e4efd75 | services |
| ddd1b53fd1924273980917d2f31c6840 | admin |
| f30a8b7e34e74f0794662f73cf09661c | project-a |
+----------------------------------+-----------+
[root@ctrl-68d9 ~(keystone_admin)]# openstack network list
+--------------------------------------+-------------+--------------------------------------+
| ID | Name | Subnets |
+--------------------------------------+-------------+--------------------------------------+
| 3397e582-9830-4649-9df8-7e605c439514 | ext-net | 33e3ca41-5a31-4fdb-b65c-16c806e64eb3 |
| 4aa5a443-ade5-4aba-acad-e8e735821683 | lb-mgmt-net | dc11bae9-84c6-4111-9abc-135a7c2fa48b |
| aeee9306-88c2-464c-94ba-6e89bf2c5971 | vx-net | 7364086b-52e3-4eff-9a88-25eb596f9fcd |
+--------------------------------------+-------------+--------------------------------------+
[root@ctrl-68d9 ~(keystone_admin)]# openstack subnet list
+--------------------------------------+----------------+--------------------------------------+----------------+
| ID | Name | Network | Subnet |
+--------------------------------------+----------------+--------------------------------------+----------------+
| 33e3ca41-5a31-4fdb-b65c-16c806e64eb3 | ext-subnet | 3397e582-9830-4649-9df8-7e605c439514 | 10.0.0.0/24 |
| 7364086b-52e3-4eff-9a88-25eb596f9fcd | vx-subnet | aeee9306-88c2-464c-94ba-6e89bf2c5971 | 172.16.5.0/24 |
| dc11bae9-84c6-4111-9abc-135a7c2fa48b | lb-mgmt-subnet | 4aa5a443-ade5-4aba-acad-e8e735821683 | 172.16.20.0/24 |
+--------------------------------------+----------------+--------------------------------------+----------------+
```
9. Start two instances, one per tenant.
- For tenant A, allocate a floating IP and assign it to the instance.
- Collect output of the `openstack server list` command.
10. Check connectivity to the assigned floating IP.
- Collect output of the `ping` command from the [storage] node, workstation, and from the instance of tenant B.
- Capture packets filtering an ICMP echo request and response frames on the external interfaces of the routers belonging to tenants A and B while running `ping` from the instance of tenant B.
11. Implement a shared network for tenants (projects) A and B.
- [x] The network type should be VXLAN.
- [x] The network named should be `vx-net`.
- [x] Create subnet `vx-subnet` with network address `172.16.5.0/24`.
- [x] Allow both tenants to use the network.
- [x] Collect output of the `openstack network show vx-net` and `openstack subnet show vx-subnet`.
- [x] Create a router connected to `vx-net` and `ext-net`.
- [x] Create two instances, one for tenant A and another for tenant B.
- [x] Attach the instances to the `vx-net`.
- [x] Assign floating IP addresses to the instances.
- [x] Check connectivity with the instances from the workstation.
- [x] Collect output of the `ping` command.
```bash
# Enable VXLAN Network Type
## Check current network type
ansible ctrl-grp -m shell -a "crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers"
ansible ctrl-grp -m shell -a "crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vlan,vxlan,flat"
# Change default tenant network types from vxlan to vlan
ansible ctrl-grp -m shell -a "crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types"
ansible ctrl-grp -m shell -a "crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vlan"
# Change VLAN ID Range between from 5 to 4000
ansible ctrl-grp -m shell -a "crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vlan network_vlan_ranges datacentre:1024:3096"
ansible ctrl-grp -m shell -a "crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vlan network_vlan_ranges"
# Restart neutron
ansible ctrl-grp -m shell -a "systemctl restart neutron-server"
# Change tunnel types preconfigured on the compute and network nodes
ansible compute-grp,network-grp -m shell -a "crudini --get /etc/neutron/plugins/ml2/openvswitch_agent.ini agent tunnel_types"
ansible compute-grp,network-grp -m shell -a "crudini --set /etc/neutron/plugins/ml2/openvswitch_agent.ini agent tunnel_types vxlan"
# Add bridge mapping to the datacentre physical network via the vx-lan bridge:
ansible compute-grp,network-grp -m shell -a "crudini --get /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs bridge_mappings"
ansible compute-grp,network-grp -m shell -a "crudini --set /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs bridge_mappings extnet:br-ex,vx-net:br-vlan"
ansible compute-grp,network-grp -m shell -a "systemctl restart neutron-openvswitch-agent"
ansible compute-grp,network-grp -m shell -a "ovs-vsctl show"
# Show bridge mappings
ansible network-grp -m shell -a "crudini --get /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs bridge_mappings"
# Create a provider network called ext-net and subnet - ext-subnet
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack network create ext-net --external --provider-network-type flat --provider-physical-network extnet"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack subnet create ext-subnet --network ext-net --gateway 10.0.0.1 --no-dhcp --subnet-range 10.0.0.0/24 --allocation-pool start=10.0.0.64,end=10.0.0.128"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && neutron net-show ext-net"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && neutron subnet-show ext-subnet"
# Create internal interface vx-net
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack network create vx-net"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack subnet create vx-subnet --network vx-net --subnet-range 172.16.5.0/24"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack network show vx-net"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack subnet show vx-subnet"
# Create a router
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack router create router1"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack router set router1 --external-gateway ext-net"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack router add subnet router1 vx-subnet"
# Create tenant networks and subnets
## Create Tenant A
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack project create --domain default --description \"A Project\" project-a"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack role add --project project-a --user admin admin"
## Create Tenant B
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack project create --domain default --description \"B Project\" project-b"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack role add --project project-b --user admin admin"
# Download the image
ansible ctrl-grp -m shell -a "curl -O http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img"
ansible ctrl-grp -m shell -a "source ~/keystonerc_admin && openstack image create cirros --public --disk-format qcow2 --file cirros-0.4.0-x86_64-disk.img"
# Enable SSH/ICMP access in the default security group
# sg_id=$(openstack security group list --project admin -c ID -f value)
# openstack security group rule create --dst-port 22 $sg_id
# openstack security group rule create --protocol icmp $sg_id
# Create vm1 in project-a
sg_id=$(openstack security group list --project project-a -c ID -f value)
openstack server create --image cirros --flavor m1.tiny --security-group $sg_id --nic net-id=vx-net vm1
# Create vm2 in project-b
sg_id=$(openstack security group list --project project-b -c ID -f value)
openstack server create --image cirros --flavor m1.tiny --security-group $sg_id --nic net-id=vx-net vm2
# Show all vms
openstack server list
```
12. Send your instructor all of the files you collected for this project:
- Organize files into directories `step1`, `step2`, etc.
- Create a `tar` archive using your last name as the filename and include all of your collected files.
- Include any comments in the `README` file inside the archive.