Try   HackMD

Privex dual RPC setup (full node + separated account history)

© 2020 Privex Inc. - https://www.privex.io/

If this guide helped you, vote for @someguy123 on Hive / Steem, and maybe buy a server from https://www.privex.io/

specs

  • min 1 TB (2-3 TB recommended) of high speed storage (NVMe preferred!)
    • this guide uses LVM for flexible raid0 (we've found it's faster than mdadm)
    • more NVMe's = faster, we recommend 3 to 8 NVMe's rather than a 1 or 2 big NVMe's
  • 64GB RAM (128GB recommended)
  • a CPU with good single threaded performance
  • Ubuntu 18.04 Bionic

Partition disks


# !!! WARNING: Will delete any existing partitions!
# Create a Linux partition filling the disk for each drive
echo ",,L" | sfdisk -X gpt /dev/nvme0n1
echo ",,L" | sfdisk -X gpt /dev/nvme1n1
echo ",,L" | sfdisk -X gpt /dev/nvme2n1

install thin provision tools

apt install thin-provisioning-tools

setup lvm

# Setup LVM on NVMe's 0, 1 and 2
pvcreate /dev/nvme{0..2}n1p1

vgcreate nvraid /dev/nvme0n1p3 /dev/nvme1n1p1 /dev/nvme2n1p1

# Change the '3' in -i3 to however many disks/partitions you're RAID0'ing
lvcreate --type raid0 -i3 -I64 -L 1500G -n thin64 nvraid

lvconvert --thinpool nvraid/thin64

lvextend --poolmetadatasize +1G nvraid/thin64

# Create volume for storing the block_log
# We'll download the block_log onto it, then snapshot it into hive1 + hive2
# This means both RPCs will only consume as much space as ONE block_log
lvcreate -V 500G --thinpool nvraid/thin64 -n blockshive

# Create two volumes for shared_memory/rocksdb
lvcreate -V 200G --thinpool nvraid/thin64 -n hiveshm1
lvcreate -V 100G --thinpool nvraid/thin64 -n hiveshm2

format shm vols + blockshive and mount blockshive

# Format the two volumes with XFS. Change the '3' in sw=3 to amount of drives raid'ed
mkfs.xfs -f -s size=4096 -b size=4096 -d su=65536,sw=3 -i maxpct=3 /dev/nvraid/hiveshm1
mkfs.xfs -f -s size=4096 -b size=4096 -d su=65536,sw=3 -i maxpct=3 /dev/nvraid/hiveshm2


# Format blockshive with XFS. Change the '3' in sw=3 to amount of drives raid'ed
mkfs.xfs -f -s size=4096 -b size=4096 -d su=65536,sw=3 -i maxpct=3 /dev/nvraid/blockshive

mkdir /mnt/blockshive
mount /dev/nvraid/blockshive /mnt/blockshive

create blockchain folder + download block log

cd /mnt/blockshive
mkdir -p witness_node_data_dir/blockchain

rsync -avh --progress --append rsync://files.privex.io/hive/block_log witness_node_data_dir/blockchain/block_log

snapshot blockshive + clone into hive1 + hive2 data vols

# once downloaded, snapshot blockshive with a timestamp

umount /mnt/blockshive
lvcreate -s -n blockshive_28mar2020 nvraid/blockshive

# remove original blockshive (won't hurt the snapshot)
lvremove nvraid/blockshive

# clone blockshive snapshot into hive1 and hive2
lvcreate -s -n hive1 nvraid/blockshive_28mar2020
lvcreate -s -n hive2 nvraid/blockshive_28mar2020

# Activate the snapshots so we can mount them
lvchange -v -K -a y nvraid/blockshive_28mar2020
lvchange -v -K -a y nvraid/hive1
lvchange -v -K -a y nvraid/hive2

# Clear XFS journal + regenerate UUID, otherwise Linux thinks they're all the same volume

xfs_repair -L /dev/mapper/nvraid-blockshive_28mar2020
xfs_repair -L /dev/mapper/nvraid-hive1
xfs_repair -L /dev/mapper/nvraid-hive2
xfs_admin -U $(uuidgen) /dev/mapper/nvraid-blockshive_28mar2020
xfs_admin -U $(uuidgen) /dev/mapper/nvraid-hive1
xfs_admin -U $(uuidgen) /dev/mapper/nvraid-hive2

create mountpoints and setup fstab

# Create mountpoints for hive data + hive shm
mkdir -p /hive/rpc{1..2}
mkdir -p /shmhive/rpc{1..2}

# Open up fstab and add the following entries
nano /etc/fstab
### BEGIN /etc/fstab entries
/dev/mapper/nvraid-hiveshm1                 /shmhive/rpc1   xfs     defaults    0 0
/dev/mapper/nvraid-hiveshm2                 /shmhive/rpc2   xfs     defaults    0 0

/dev/mapper/nvraid-hive1                    /hive/rpc1      xfs     defaults    0 0
/dev/mapper/nvraid-hive2                    /hive/rpc2      xfs     defaults    0 0
### END /etc/fstab entries

mount volumes, clone hive-rpcs-docker, and copy configs

# now mount all of them

mount -av

cd ~


git clone https://github.com/Privex/hive-rpcs-docker.git rpc

cd rpc

git clone https://github.com/Someguy123/steem-docker.git acchist
git clone https://github.com/Someguy123/steem-docker.git core

cp core.env core/.env
cp acchist.env acchist/.env

cp rpc1_config.ini /hive/rpc1/witness_node_data_dir/config.ini
cp rpc2_config.ini /hive/rpc2/witness_node_data_dir/config.ini

install docker if needed, then build images + replay

cd acchist/

./run.sh install_docker

STEEM_SOURCE="https://github.com/openhive-network/hive.git" ./run.sh build 0.23.0 tag hive-lowmem-nomira 'ENABLE_MIRA=OFF'

./run.sh replay

cd ../core

STEEM_SOURCE="https://github.com/openhive-network/hive.git" ./run.sh build_full 0.23.0 tag hive-fullmem-mira 'ENABLE_MIRA=ON'

./run.sh replay


once acchist is replayed:


cd ~/rpc/acchist
./run.sh stop

cd ~/rpc

docker-compose up -d rpc-acchist nginx-acchist jussi redis1 redis2

once core is replayed:

cd ~/rpc/core
./run.sh stop

cd ~/rpc

docker-compose up -d rpc-core nginx-core

install nginx on host + create nginx config to point to jussi

apt install -y nginx

nano /etc/nginx/snippets/rpc.conf
nano /etc/nginx/sites-enabled/hived

rpc.conf snippet:

access_log off;

proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_connect_timeout 10;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

keepalive_timeout 65;
keepalive_requests 100000;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
proxy_ssl_verify off;

nginx config:

upstream hivesrvs {
# Dirty Hack. Causes nginx to retry jussi
   server 127.0.0.1:8080;
   server 127.0.0.1:8080;
   server 127.0.0.1:8080;
   server 127.0.0.1:8080;
   keepalive 10;
}

server {
    server_name direct.hived.privex.io hived.privex.io;
    root /var/www/html/;

    location ~ ^(/|/ws) {
        proxy_pass http://hivesrvs;
        proxy_set_header Connection "";
        include snippets/rpc.conf;
    }

    listen 80;
    listen [::]:80;

#    listen 443 http2 ssl;
#    listen [::]:443 http2 ssl;

}