# nftables
###### tags: `linux` `network` `nftables`
### network
```
+-----------------+
| |
| Gateway |
| |
+-----------------+ +--------------------+ +--------------------+
|192.168.122.1 | | | |
| | Firewall | | Server |
+-----------------+ 222 +---------------+ ssh 22 |
| 8080 | | web 80 |
+--------------------+ +--------------------+
192.168.122.151 172.16.0.1 172.16.0.2
```
### list table, chin and rules
```
root@firewall:/home/ycheng/nftables# nft list tables
table ip filter
root@firewall:/home/ycheng/nftables# nft list chains
table ip filter {
chain input {
type filter hook input priority filter; policy accept;
}
}
root@firewall:/home/ycheng/nftables# nft list ruleset
table ip filter {
chain input {
type filter hook input priority filter; policy accept;
tcp dport 22 accept
}
}
root@firewall:/home/ycheng/nftables# nft list chains -a
table ip filter {
chain input { # handle 1
type filter hook input priority filter; policy accept;
}
}
root@firewall:/home/ycheng/nftables# nft list ruleset -a
table ip filter { # handle 7
chain input { # handle 1
type filter hook input priority filter; policy accept;
tcp dport 22 accept # handle 2
}
}
```
### source NAT
```
root@firewall:/home/ycheng/nftables# echo 1 > /proc/sys/net/ipv4/ip_forward
root@firewall:/home/ycheng/nftables# ip addr add 172.16.0.1/24 dev ens8
root@firewall:/home/ycheng/nftables# ip link set dev ens8 up
root@firewall:/home/ycheng/nftables# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: ens3: mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 52:54:00:85:66:da brd ff:ff:ff:ff:ff:ff
inet 192.168.122.151/24 brd 192.168.122.255 scope global dynamic ens3
valid_lft 2412sec preferred_lft 2412sec
3: ens8: mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 52:54:00:38:46:8b brd ff:ff:ff:ff:ff:ff
inet 172.16.0.1/24 scope global ens8
valid_lft forever preferred_lft forever
root@firewall:/home/ycheng/nftables# nft add table nat
root@firewall:/home/ycheng/nftables# nft 'add chain nat post-routing { type nat hook postrouting priority 100; policy accept; }'
root@firewall:/home/ycheng/nftables# nft add rule nat post-routing ip saddr 172.16.0.0/24 oif ens3 snat 192.168.122.151
root@firewall:/home/ycheng/nftables# nft list ruleset
table ip nat {
chain post-routing {
type nat hook postrouting priority srcnat; policy accept;
ip saddr 172.16.0.0/24 oif "ens3" snat to 192.168.122.151
}
}
```
### destination NAT
```
# Set default policy to accept in this pre-routing chain if you have source NAT in use.
root@firewall:/home/ycheng/nftables# nft 'add chain nat pre-routing { type nat hook prerouting priority -100; policy accept; }'
root@firewall:/home/ycheng/nftables# nft list chains
table ip nat {
chain post-routing {
type nat hook postrouting priority srcnat; policy accept;
}
chain pre-routing {
type nat hook prerouting priority 100; policy accept;
}
}
root@firewall:/home/ycheng/nftables# nft 'add rule ip nat pre-routing iifname ens3 tcp dport { 8080 } dnat 172.16.0.2:80'
root@firewall:/home/ycheng/nftables# nft 'add rule ip nat pre-routing iifname ens3 tcp dport { 222 } dnat 172.16.0.2:22'
# change source IP (optional, use when your virutal environment has snat configured.
root@firewall:/home/ycheng/nftables# nft add rule ip nat post-routing ip daddr 172.16.0.2 masquerade
root@firewall:/home/ycheng/nftables# nft list ruleset -a
table ip nat { # handle 8
chain post-routing { # handle 1
type nat hook postrouting priority srcnat; policy accept;
ip saddr 172.16.0.0/24 oif "ens3" snat to 192.168.122.151 # handle 26
ip daddr 172.16.0.2 masquerade # handle 29
}
chain pre-routing { # handle 14
type nat hook prerouting priority dstnat; policy accept;
iifname "ens3" tcp dport { 8080 } dnat to 172.16.0.2:80 # handle 31
iifname "ens3" tcp dport { 222 } dnat to 172.16.0.2:22 # handle 33
}
}
```
### save to file
```
15 November 2020
logical volume manager (lvm)
PV - physical volume
VG - volume group
PE - physical extent
LV - logical volume
root@develop:/home/ycheng# lvmdiskscan
/dev/vda2 [ <20.00 GiB]
/dev/vdb [ 20.00 GiB]
/dev/vdc [ 20.00 GiB]
2 disks
1 partition
0 LVM physical volume whole disks
0 LVM physical volumes
# create PV
----------------------------------------------------------------
root@develop:/home/ycheng# pvcreate /dev/vdb
Physical volume "/dev/vdb" successfully created.
root@develop:/home/ycheng# pvcreate /dev/vdc
Physical volume "/dev/vdc" successfully created.
root@develop:/home/ycheng# lvmdiskscan
/dev/vda2 [ <20.00 GiB]
/dev/vdb [ 20.00 GiB] LVM physical volume
/dev/vdc [ 20.00 GiB] LVM physical volume
0 disks
1 partition
2 LVM physical volume whole disks
0 LVM physical volumes
root@develop:/home/ycheng# pvdisplay
"/dev/vdc" is a new physical volume of "20.00 GiB"
--- NEW Physical volume ---
PV Name /dev/vdc
VG Name
PV Size 20.00 GiB
Allocatable NO
PE Size 0
Total PE 0
Free PE 0
Allocated PE 0
PV UUID LraVsw-U2HV-e2nA-z7rc-qKtx-QVmr-YNk0dF
"/dev/vdb" is a new physical volume of "20.00 GiB"
--- NEW Physical volume ---
PV Name /dev/vdb
VG Name
PV Size 20.00 GiB
Allocatable NO
PE Size 0
Total PE 0
Free PE 0
Allocated PE 0
PV UUID o4dT1U-SkDI-lunc-2Q7n-60OJ-40TL-72zoFB
!!! remove PV
pvremove < PV Name >
# create VG
----------------------------------------------------------------
root@develop:/home/ycheng# vgcreate vg1 /dev/vdb /dev/vdc
Volume group "vg1" successfully created
root@develop:/home/ycheng# vgdisplay
--- Volume group ---
VG Name vg1
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 39.99 GiB
PE Size 4.00 MiB
Total PE 10238
Alloc PE / Size 0 / 0
Free PE / Size 10238 / 39.99 GiB
VG UUID OwIsAL-iQSB-FJb0-dGUG-O4ro-1iNK-sw7Baf
!!! create VG with PE size to 128MiB
vgcreate -s 128 vg1 /dev/vdb /dev/vdc
!!! remove VG
vgremove vg1
!!! maximum 65534 PEs.
# create LV
----------------------------------------------------------------
root@develop:/home/ycheng# lvcreate -L 8G -n lv1 vg1
Logical volume "lv1" created.
root@develop:/home/ycheng# lvcreate -l 100%FREE -n lv2 vg1
Logical volume "lv2" created.
root@develop:/home/ycheng# lvdisplay
--- Logical volume ---
LV Path /dev/vg1/lv1
LV Name lv1
VG Name vg1
LV UUID 03jsBD-3XKC-88oZ-XHwJ-uJHw-J92j-MtTGdL
LV Write Access read/write
LV Creation host, time develop, 2020-11-15 05:46:58 +0000
LV Status available
# open 0
LV Size 8.00 GiB
Current LE 2048
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 253:0
--- Logical volume ---
LV Path /dev/vg1/lv2
LV Name lv2
VG Name vg1
LV UUID zMhcOs-Xpib-SSyA-fmeI-6puj-k3v8-NXAsb6
LV Write Access read/write
LV Creation host, time develop, 2020-11-15 05:47:59 +0000
LV Status available
# open 0
LV Size 31.99 GiB
Current LE 8190
Segments 2
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 253:1
!!! create "striped" allocation type LV 8GB
root@develop:/home/ycheng# lvcreate --type striped -i 2 -L 8G -n striped_lv1 vg1
Using default stripesize 64.00 KiB.
Logical volume "striped_lv1" created.
!!! allocate 100% free extends to create new LV. "-l < extends number >"
root@develop:/home/ycheng# lvcreate --type striped -i 2 -l 100%FREE -n striped_lv2 vg1
Using default stripesize 64.00 KiB.
Logical volume "striped_lv2" created.
!!! remove LV
lvremove /dev/vg1/striped_lv2
Create snapshot and merge (recovery)
----------------------------------------------------------------
root@develop:/home/ycheng# lvcreate -s -L 8G -n snap_striped_lv1 vg1/striped_lv1
Using default stripesize 64.00 KiB.
Logical volume "snap_striped_lv1" created.
root@develop:/home/ycheng# lvconvert --merge vg1/snap_striped_lv1
Merging of volume vg1/snap_striped_lv1 started.
vg1/striped_lv1: Merged: 100.00%
!!! remove snapshot
lvremove
Resize LV
----------------------------------------------------------------
root@develop:/home/ycheng# lvscan
ACTIVE '/dev/vg1/striped_lv1' [8.00 GiB] inherit
root@develop:/home/ycheng# lvresize -L +4G vg1/striped_lv1
Using stripesize of last segment 64.00 KiB
Size of logical volume vg1/striped_lv1 changed from 8.00 GiB (2048 extents) to 12.00 GiB (3072 extents).
Logical volume vg1/striped_lv1 successfully resized.
Remove PV from VG
----------------------------------------------------------------
pvmove /dev/vdc
vgreduce vg1 /dev/vdc
pvremove /dev/vdc
Rename VG
----------------------------------------------------------------
root@develop:/home/ycheng# vgscan
Reading volume groups from cache.
Found volume group "vg1" using metadata type lvm2
root@develop:/home/ycheng# vgrename vg1 new_vg1
Volume group "vg1" successfully renamed to "new_vg1"
root@develop:/home/ycheng# vgscan
Reading volume groups from cache.
Found volume group "new_vg1" using metadata type lvm2
Other commands
----------------------------------------------------------------
vgextend - add new PV to VG
vgchange - change VG attributes
lvextend - increase LV size
lvreduce - reduce LV size
Posted by Knowledge Base 2.0 at 17:29:00 No comments:
Chrony install and configure
[ Setup Chrony NTP server ]
# install and start chrony service
apt update
apt install -y chrony
systemctl start chrony
systemctl enable chrony
root@ceph-mon-01:~# chronyc activity
200 OK
8 sources online
0 sources offline
0 sources doing burst (return to online)
0 sources doing burst (return to offline)
0 sources with unknown address
# edit config with nearest zone.
vim /etc/chrony/chrony.conf
#pool ntp.ubuntu.com iburst maxsources 4
#pool 0.ubuntu.pool.ntp.org iburst maxsources 1
#pool 1.ubuntu.pool.ntp.org iburst maxsources 1
#pool 2.ubuntu.pool.ntp.org iburst maxsources 2
pool 0.oceania.pool.ntp.org iburst maxsources 2
pool 1.oceania.pool.ntp.org iburst maxsources 2
pool 2.oceania.pool.ntp.org iburst maxsources 2
pool 3.oceania.pool.ntp.org iburst maxsources 2
# allow all client on the same local network
# change to your own IP/networks
vim /etc/chrony/chrony.conf
allow 192.168.1.0/24
systemctl restart chrony
systemctl status chrony
[ Setup NTP client wiht Chrony ]
apt update
apt install -y chrony
# Change default ntp pool to the Chrony NTP server
vim /etc/chrony/chrony.conf
#pool ntp.ubuntu.com iburst maxsources 4
#pool 0.ubuntu.pool.ntp.org iburst maxsources 1
#pool 1.ubuntu.pool.ntp.org iburst maxsources 1
#pool 2.ubuntu.pool.ntp.org iburst maxsources 2
server 192.168.1.1 prefer iburst
systemctl restart chrony
[ Check NTP server used by chronyd service ]
# check on both server or client.
root@ceph-mon-01:~# chronyc sources
210 Number of sources = 8
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^- time.cloudflare.com 3 6 377 47 +680us[ +680us] +/- 7331us
^- ns1.att.wlg.telesmart.co> 2 6 377 49 -2532us[-2532us] +/- 33ms
^- ns1.tdc.akl.telesmart.co> 2 6 377 47 +2926us[+2926us] +/- 20ms
^- ntp2.ds.network 4 6 377 46 +443us[ +443us] +/- 30ms
^* ntp.seby.io 2 6 377 51 -148us[ -198us] +/- 1896us
^- ns2.tdc.akl.telesmart.co> 2 6 377 46 -3574us[-3574us] +/- 23ms
^- ntp03.lagoon.nc 2 6 377 48 -498us[ -498us] +/- 75ms
^- warrane.connect.com.au 3 6 377 47 +750us[ +750us] +/- 129ms
root@ceph-osd-02:/home/ycheng# chronyc sources
210 Number of sources = 1
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^* 192.168.1.1 3 6 17 19 -7870ns[ -13us] +/- 1959us
root@ceph-osd-02:/home/ycheng# chronyc sources -v
210 Number of sources = 1
.-- Source mode '^' = server, '=' = peer, '#' = local clock.
/ .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| / '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
|| .- xxxx [ yyyy ] +/- zzzz
|| Reachability register (octal) -. | xxxx = adjusted offset,
|| Log2(Polling interval) --. | | yyyy = measured offset,
|| \ | | zzzz = estimated error.
|| | | \
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^* 192.168.1.1 3 6 17 44 -7870ns[ -13us] +/- 1959us
root@ceph-mon-01:~# chronyc clients
Hostname NTP Drop Int IntL Last Cmd Drop Int Last
===============================================================================
ceph-osd-02 6 0 5 - 62 0 0 - -
Posted by Knowledge Base 2.0 at 11:03:00 No comments:
Labels: chrony, ntp
basic ss command usage (socket statistics)
# list tcp/udp/unix establised and connected connections
# use-e to print extend info
ss -t
ss -te
# also list listening
ss -at
# list udp
ss -ua
# no resolve hostname
ss -nt
# only listening
ss -ltn
ss -lun
# print process name and pid
ss -ltp
# print summary statistics
ss -s
# print timer
ss -tno
# ipv4 or 6
ss -lt -f inet
ss -lt4
ss -lt6
# filter tcp connection state
ss -t4 state [established | sync-sent | sync-recv | time-wait | closed | close-wait | connected | ... ]
# filter address and port
ss -t4 state established dport = :22
ss -at '( dport = :22 or sport = :22 )'
ss -at '( dst :443 or dst :80 )'
ss -at dst :443 or dst :80
ss -at dst 192.168.1.1
ss -at dst 192.168.1.0/24
ss -at dst 192.168.1.1:22
ss -at src 192.168.1.1 sport gt :3000
compare operator: lt , ge , eq , gt , lt , ne
compare operator: > , >= , == , < , > , !=
Posted by Knowledge Base 2.0 at 00:13:00 No comments:
Labels: ss
31 October 2020
Some bash command pipelines usage
list address/link/route in all network namespaces
ip netns | cut -d' ' -f1 | xargs -L1 -I {} sh -c "echo {}; ip netns exec {} ip -br -4 a | sed 's/^/ /g'; echo"
ip netns | cut -d' ' -f1 | xargs -L1 -I {} sh -c "echo {}; ip netns exec {} ip -br -4 a | sed 's/^/ /g' | awk '{print \$1,\$3}'; echo"
ip netns | cut -d' ' -f1 | xargs -L1 -I {} sh -c "echo {}; ip netns exec {} ip -br l | sed 's/^/ /g'; echo"
ip netns | cut -d' ' -f1 | xargs -L1 -I {} sh -c "echo {}; ip netns exec {} ip -br l | awk '{print \$1,\$3}' | sed 's/^/ /g'; echo"
ip netns | cut -d' ' -f1 | xargs -L1 -I {} sh -c "echo {}; ip netns exec {} ip r | sed 's/^/ /g'; echo"
linux bridge
brctl show | tail -n +2 | sed -e 's/^\t\t\t*/,/g' -e 's/\t/ /g' | awk '{print $1,$4}' | sed -z -e 's/\n,/,/g' -e 's/ ,/,/g'
Virsh
virsh dumpxml {instance_name} | grep "dev='vnet\|dev='tap" | cut -d"'" -f2
virsh dumpxml {instance_name} | grep -B2 "dev='vnet\|dev='tap" | grep "mac address\|target dev" | cut -d"'" -f2 | sed 'N;s/\n/ /'
virsh dumpxml {instance_name} | grep -B3 -A4 "dev='vnet\|dev='tap" | grep "mac address\|target dev\|alias name" | cut -d"'" -f2 | sed 'N;N;s/\n/ /g'
root@nuc:/home/ycheng# virsh list | grep "running" | awk '{print $2}' | xargs -L1 -I {} sh -c "echo {}; virsh dumpxml {} | grep \"dev='vnet\|dev='tap\" | cut -d\"'\" -f2 | sed 's/^/ /g'"
firewall
vnet0
vnet1
firewall_2
vnet2
root@nuc:/home/ycheng# virsh list | grep "running" | awk '{print $2}' | xargs -L1 -I {} sh -c "echo {}; virsh dumpxml {} | grep -B2 \"dev='vnet\|dev='tap\" | grep \"mac address\|target dev\" | cut -d\"'\" -f2 | sed 'N;s/\n/ /' | sed 's/^/ /g'"
firewall
52:54:00:85:66:da vnet0
52:54:00:38:46:8b vnet1
firewall_2
52:54:00:c2:d7:ef vnet2
Posted by Knowledge Base 2.0 at 23:54:00 No comments:
18 October 2020
ip command usage
add alias in ~/.bashrc or ~/.bash_aliases
alias ip4-a="ip -4 a"
alias ip4-ba="ip -4 -br a"
alias ip4-bl="ip -br l"
alias ip4-sa="ip -4 -s a"
alias ip4-sl="ip -s l"
alias ip4-as="ip -4 a show"
alias ip4-ls="ip l show"
List interfaces and IPs
ip addr
ip -4 -brief addr
ip -4 -color addr
ip -4 -json addr
ip -4 -details addr
ip -4 -stats addr
ip -f link -br addr
ip -0 -br a
ip -br link
ip -o link
ip -4 -o addr
Add and delete IPs, set link
ip addr add 192.168.0.1/24 dev eth0
ip addr del 192.168.0.1/24 dev eth0
ip link set eth0 [ up | down ]
ip link set eth0 mtu 9000
ip link set eth0 promisc [ on | off ]
Route
ip route
ip -d route
ip -4 route
ip route show default
ip route show 192.168.0.0/24
ip route add default via 192.168.0.254
ip route del default via 192.168.0.254
ip route add 172.16.0.0/24 via 192.168.0.100 dev eth0
ip route del 172.16.0.0/24
ip route replace 172.16.0.0/24 dev eth1
# check which route is used for a destination IP
ip route get 172.16.0.1
# for persistence static routes in Ubuntu, add line in /etc/network/interfaces
up ip route add 172.16.00.0/24 via 192.168.0.100 dev eth0
Neighbor cache
ip neigh add 192.168.0.100 lladdr aa:bb:cc:11:22:33 dev eth0
ip neigh del 192.168.0.100 dev eth0
ip neigh replace 192.168.0.100 lladdr aa:bb:cc:44:55:66 dev eth0
Network Namespace
ip netns
ip netns [ add | del ] ns1
ip netns exec ns1 ip addr
Virtual Ethernet Device (veth)
ip link add veth1 type veth peer name veth2
ip link set veth1 netns ns1
# create veth in specific network namesapces
ip add veth_name1 netns ns1 type veth peer veth2 netns ns2
# check veth peer interface ID
ethtool -S veth1
# delete veth pair
ip link del veth1
Monitor
ip monitor
ip -t monitor
ip -ts monitor
GRE Tunnel
ip tunnel show
# Create gre tunnel interface
ip tunnel add tunnel0 mode gre remote {Remote Peer IP} local {Local Peer IP} ttl 255
ip link set tunnel0 up
# assign IP on tunnel port (optional)
ip addr add {IP Address/Mask} dev tunnel0
# add route
ip route add {Destination Network Address/Mask} dev tunne0
ip tunnel delete
Virtual routing and Forwarding (VRF)
ip link show type vrf
ip -br link show type vrf
ip neigh show vrf {VRF Name}
ip addr show vrf {VRF Name}
ip route show vrf {VRF Name}
ip route show {Table ID}
ip route get vrf {VRF Name} {Network Address}
ip link add {VRF Name} type vrf table {Table ID}
# ex. ip link add vrf1 type vrf table 1
ip link set dev {DEV Name} nomaster
Ref:
http://www.routereflector.com/2016/11/working-with-vrf-on-linux/
https://www.kernel.org/doc/Documentation/networking/vrf.txt
Posted by Knowledge Base 2.0 at 02:47:00 No comments:
Labels: ip
6 October 2020
nftables
+-----------------+
| |
| Gateway |
| |
+-----------------+ +--------------------+ +--------------------+
|192.168.122.1 | | | |
| | Firewall | | Server |
+-----------------+ 222 +---------------+ ssh 22 |
| 8080 | | web 80 |
+--------------------+ +--------------------+
192.168.122.151 172.16.0.1 172.16.0.2
List Table, Chain, Rules
root@firewall:/home/ycheng/nftables# nft list tables
table ip filter
root@firewall:/home/ycheng/nftables# nft list chains
table ip filter {
chain input {
type filter hook input priority filter; policy accept;
}
}
root@firewall:/home/ycheng/nftables# nft list ruleset
table ip filter {
chain input {
type filter hook input priority filter; policy accept;
tcp dport 22 accept
}
}
root@firewall:/home/ycheng/nftables# nft list chains -a
table ip filter {
chain input { # handle 1
type filter hook input priority filter; policy accept;
}
}
root@firewall:/home/ycheng/nftables# nft list ruleset -a
table ip filter { # handle 7
chain input { # handle 1
type filter hook input priority filter; policy accept;
tcp dport 22 accept # handle 2
}
}
Source NAT
root@firewall:/home/ycheng/nftables# echo 1 > /proc/sys/net/ipv4/ip_forward
root@firewall:/home/ycheng/nftables# ip addr add 172.16.0.1/24 dev ens8
root@firewall:/home/ycheng/nftables# ip link set dev ens8 up
root@firewall:/home/ycheng/nftables# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: ens3: mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 52:54:00:85:66:da brd ff:ff:ff:ff:ff:ff
inet 192.168.122.151/24 brd 192.168.122.255 scope global dynamic ens3
valid_lft 2412sec preferred_lft 2412sec
3: ens8: mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 52:54:00:38:46:8b brd ff:ff:ff:ff:ff:ff
inet 172.16.0.1/24 scope global ens8
valid_lft forever preferred_lft forever
root@firewall:/home/ycheng/nftables# nft add table nat
root@firewall:/home/ycheng/nftables# nft 'add chain nat post-routing { type nat hook postrouting priority 100; policy accept; }'
root@firewall:/home/ycheng/nftables# nft add rule nat post-routing ip saddr 172.16.0.0/24 oif ens3 snat 192.168.122.151
root@firewall:/home/ycheng/nftables# nft list ruleset
table ip nat {
chain post-routing {
type nat hook postrouting priority srcnat; policy accept;
ip saddr 172.16.0.0/24 oif "ens3" snat to 192.168.122.151
}
}
Destination NAT
# Set default policy to accept in this pre-routing chain if you have source NAT in use.
root@firewall:/home/ycheng/nftables# nft 'add chain nat pre-routing { type nat hook prerouting priority -100; policy accept; }'
root@firewall:/home/ycheng/nftables# nft list chains
table ip nat {
chain post-routing {
type nat hook postrouting priority srcnat; policy accept;
}
chain pre-routing {
type nat hook prerouting priority 100; policy accept;
}
}
root@firewall:/home/ycheng/nftables# nft 'add rule ip nat pre-routing iifname ens3 tcp dport { 8080 } dnat 172.16.0.2:80'
root@firewall:/home/ycheng/nftables# nft 'add rule ip nat pre-routing iifname ens3 tcp dport { 222 } dnat 172.16.0.2:22'
# change source IP (optional, use when your virutal environment has snat configured.
root@firewall:/home/ycheng/nftables# nft add rule ip nat post-routing ip daddr 172.16.0.2 masquerade
root@firewall:/home/ycheng/nftables# nft list ruleset -a
table ip nat { # handle 8
chain post-routing { # handle 1
type nat hook postrouting priority srcnat; policy accept;
ip saddr 172.16.0.0/24 oif "ens3" snat to 192.168.122.151 # handle 26
ip daddr 172.16.0.2 masquerade # handle 29
}
chain pre-routing { # handle 14
type nat hook prerouting priority dstnat; policy accept;
iifname "ens3" tcp dport { 8080 } dnat to 172.16.0.2:80 # handle 31
iifname "ens3" tcp dport { 222 } dnat to 172.16.0.2:22 # handle 33
}
}
Save to file
root@firewall:/home/ycheng/nftables# nft list ruleset > ./default.nft
# load from file
root@firewall:/home/ycheng/nftables# nft -f ./default.nft
# Edit the file with "flush ruleset".
#!/usr/sbin/nft -f
flush ruleset
table ip nat { # handle 8
chain post-routing { # handle 1
type nat hook postrouting priority srcnat; policy drop;
ip saddr 172.16.0.0/24 oif "ens3" snat to 192.168.122.151 # handle 26
ip daddr 172.16.0.2 masquerade # handle 29
}
chain pre-routing { # handle 14
type nat hook prerouting priority dstnat; policy accept;
iifname "ens3" tcp dport { 8080 } dnat to 172.16.0.2:80 # handle 31
iifname "ens3" tcp dport { 222 } dnat to 172.16.0.2:22 # handle 33
}
}
nft list ruleset
nft flush ruleset
nft delete table
nft -f /path/to/nftables.conf
nft add table inet TABLE_NAME
# "inet" includes ip and ip6
nft add table inet filter
nft 'add chain inet filter input { type filter hook input priority 0 ; counter ; policy drop ; }'
# prioirty 0, it means rules have higher priority. give minus integer to hook, means more higher priority.
# each type of hook has its priority.
nft 'add chain inet filter forward { type filter hook forward priority 0 ; counter ; policy drop ; }'
nft 'add rule inet filter forward ct state established, related accept'
nft 'add rule inet filter forward ct state invalid drop'
nft 'add chain inet filter output { type filter hook output priority 0 ; counter ; policy drop ; }'
```