owned this note
owned this note
Published
Linked with GitHub
YAMBO TUTORIAL: files and scripts
===
> [name=Fulvio] Here we give a reference for the various files and scripts used in the tutorials
[TOC]
### GW input file for parallel section
<details>
```bash=
# ____ ____ _ ____ ____ ______ ___
# |_ _||_ _| / \ |_ \ / _||_ _ \ ." `.
# \ \ / / / _ \ | \/ | | |_) | / .-. \
# \ \/ / / ___ \ | |\ /| | | __". | | | |
# _| |_ _/ / \ \_ _| |_\/_| |_ _| |__) |\ `-" /
# |______||____| |____||_____||_____||_______/ `.___."
#
#
#
# Version 5.1.0 Revision 21422 Hash (prev commit) fde6e2a07
# Branch is
# MPI+OpenMP+SLK+HDF5_MPI_IO Build
# http://www.yambo-code.org
#
rim_cut # [R] Coulomb potential
gw0 # [R] GW approximation
ppa # [R][Xp] Plasmon Pole Approximation for the Screened Interaction
dyson # [R] Dyson Equation solver
HF_and_locXC # [R] Hartree-Fock
em1d # [R][X] Dynamically Screened Interaction
FFTGvecs = 20 Ry # [FFT] Plane-waves
X_Threads=0 # [OPENMP/X] Number of threads for response functions
DIP_Threads=0 # [OPENMP/X] Number of threads for dipoles
SE_Threads=0 # [OPENMP/GW] Number of threads for self-energy
RandQpts=1000000 # [RIM] Number of random q-points in the BZ
RandGvec= 100 RL # [RIM] Coulomb interaction RS components
CUTGeo= "slab z" # [CUT] Coulomb Cutoff geometry: box/cylinder/sphere/ws/slab X/Y/Z/XY..
% CUTBox
0.000000 | 0.000000 | 0.000000 | # [CUT] [au] Box sides
%
CUTRadius= 0.000000 # [CUT] [au] Sphere/Cylinder radius
CUTCylLen= 0.000000 # [CUT] [au] Cylinder length
CUTwsGvec= 0.700000 # [CUT] WS cutoff: number of G to be modified
EXXRLvcs= 2 Ry # [XX] Exchange RL components
VXCRLvcs= 2 Ry # [XC] XCpotential RL components
Chimod= "HARTREE" # [X] IP/Hartree/ALDA/LRC/PF/BSfxc
% BndsRnXp
1 | 300 | # [Xp] Polarization function bands
%
NGsBlkXp= 1 Ry # [Xp] Response block size
% LongDrXp
1.000000 | 1.000000 | 1.000000 | # [Xp] [cc] Electric Field
%
PPAPntXp= 27.21138 eV # [Xp] PPA imaginary energy
XTermKind= "none" # [X] X terminator ("none","BG" Bruneval-Gonze)
% GbndRnge
1 | 300 | # [GW] G[W] bands range
%
GTermKind= "none" # [GW] GW terminator ("none","BG" Bruneval-Gonze,"BRS" Berger-Reining-Sottile)
DysSolver= "n" # [GW] Dyson Equation solver ("n","s","g")
%QPkrange # [GW] QP generalized Kpoint/Band indices
1|19|23|30|
%
```
</details>
### Slurm submission scripts on VEGA
#### Running on CPU:
<details>
```bash=
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=16
#SBATCH --cpus-per-task=1
#SBATCH --partition=cpu
#SBATCH --time=0:30:00
#SBATCH --account=d2021-135-users
#SBATCH --mem-per-cpu=2000MB
#SBATCH --job-name=mos2
#SBATCH --reservation=maxcpu
# load yambo and dependencies
module purge
module use /ceph/hpc/data/d2021-135-users/modules
module load YAMBO/5.1.1-FOSS-2022a
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
# run job
srun --mpi=pmix -n ${SLURM_NTASKS} yambo -F gw.in -J GW_dbs -C GW_reports
```
</details>
#### Running on GPU:
<details>
```bash=
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
#SBATCH --ntasks-per-socket=2
#SBATCH --cpus-per-task=64
#SBATCH --gres=gpu:4
#SBATCH --partition=gpu
#SBATCH --time=0:30:00
#SBATCH --account=d2021-135-users
#SBATCH --mem=230000MB
#SBATCH --job-name=mos2-test
#SBATCH --exclusive
#SBATCH --reservation=maxgpu
# load yambo and dependencies
module purge
module use /ceph/hpc/data/d2021-135-users/modules
module load YAMBO/5.1.1-OMPI-4.0.5-NVHPC-21.2-CUDA-11.2.1
export OMP_NUM_THREADS=8
srun --mpi=pmix -n ${SLURM_NTASKS} yambo -F gw.in -J GW_dbs -C GW_reports
```
</details>
#### Slurm submission for convergence tutorial
<details>
```bash=
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=16
#SBATCH --cpus-per-task=1
#SBATCH --partition=cpu
#SBATCH --time=0:30:00
#SBATCH --account=d2021-135-users
#SBATCH --mem-per-cpu=2000MB
#SBATCH --job-name=mos2
# load yambo and dependencies
module purge
module use /ceph/hpc/data/d2021-135-users/modules
module load YAMBO/5.1.1-FOSS-2022a
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
file0='i01-GW'
for POL_BANDS in 20 40 60 80; do
echo 'NGsBlkXp [Ry] E_vale [eV] E_cond [eV]' > summary_01_${POL_BANDS}bands.txt
for NGsBlkXp_Ry in 6 8 10 12; do
label=Xp_${POL_BANDS}_bands_${NGsBlkXp_Ry}_Ry
jdir=job_${label}
cdir=out_${label}
filein=i01-GW_${label}
sed "s/NGsBlkXp=.*/NGsBlkXp=${NGsBlkXp_Ry} Ry/;
/% BndsRnXp/{n;s/.*/ 1 | ${POL_BANDS} |/}" $file0 > $filein
# run yambo
srun --mpi=pmix -n ${SLURM_NTASKS} yambo -F $filein -J $jdir -C $cdir
E_GW_v=`grep -v '#' ${cdir}/o-${jdir}.qp|head -n 1| awk '{print $3+$4}'`
E_GW_c=`grep -v '#' ${cdir}/o-${jdir}.qp|tail -n 1| awk '{print $3+$4}'`
GAP_GW=`echo $E_GW_c - $E_GW_v |bc`
echo ${NGsBlkXp_Ry} ' ' ${E_GW_v} ' ' ${E_GW_c} ' ' ${GAP_GW} >> summary_01_${POL_BANDS}bands.txt
done
done
```
</details>
#### Slurm submission for parallel tutorial
<details>
```bash=
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=16
#SBATCH --cpus-per-task=1
#SBATCH --partition=cpu
#SBATCH --time=1:00:00
#SBATCH --account=d2021-135-users
#SBATCH --mem-per-cpu=2000MB
#SBATCH --job-name=mos2
##ATCH --reservation=maxcpu
# load yambo and dependencies
module purge
module use /ceph/hpc/data/d2021-135-users/modules
module load YAMBO/5.1.1-FOSS-2022a
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
# info
ncpu=${SLURM_NTASKS}
nthreads=${OMP_NUM_THREADS}
label=MPI${ncpu}_OMP${nthreads}
jdir=run_${label}
cdir=run_${label}.out
# Update input file
filein0=gw.in # Original file
filein=gw_${label}.in # New file
cp -f $filein0 $filein
cat >> $filein << EOF
DIP_CPU= "1 $ncpu 1" # [PARALLEL] CPUs for each role
DIP_ROLEs= "k c v" # [PARALLEL] CPUs roles (k,c,v)
DIP_Threads= 0 # [OPENMP/X] Number of threads for dipoles
X_and_IO_CPU= "1 1 1 $ncpu 1" # [PARALLEL] CPUs for each role
X_and_IO_ROLEs= "q g k c v" # [PARALLEL] CPUs roles (q,g,k,c,v)
X_and_IO_nCPU_LinAlg_INV=1 # [PARALLEL] CPUs for Linear Algebra (if -1 it is automatically set)
X_Threads= 0 # [OPENMP/X] Number of threads for response functions
SE_CPU= "1 $ncpu 1" # [PARALLEL] CPUs for each role
SE_ROLEs= "q qp b" # [PARALLEL] CPUs roles (q,qp,b)
SE_Threads= 0 # [OPENMP/GW] Number of threads for self-energy
EOF
# run yambo
srun --mpi=pmix -n $ncpu yambo -F $filein -J $jdir -C $cdir
```
</details>
### Python scripts
#### Convergence plot 1
<details>
```python=
import numpy as np
import matplotlib.pyplot as plt
list_of_bands = [20,40,60,80]
list_of_data = []
for bands in list_of_bands:
data = np.loadtxt(f'summary_01_{bands}bands.txt',comments='NGs')
list_of_data.append(data)
for bands,data in zip(list_of_bands,list_of_data):
X=data[:,0]
Y=data[:,2]-data[:,1]
plt.plot(X,Y,marker='.',label=f'{bands}_bands')
plt.ylabel('Direct Gap [eV]')
plt.xlabel('NGsBlkXp [Ry]')
plt.legend()
plt.savefig('fig-01.png')
```
</details>
#### Convergence plot 2
<details>
```python=
import numpy as np
import matplotlib.pyplot as plt
list_of_files = ['summary_02_noBG.txt']
#list_of_files = ['summary_02_noBG.txt','summary_03_BG.txt']
list_of_data = []
for _file in list_of_files:
data = np.loadtxt(_file,comments='G b')
list_of_data.append(data)
for _file,data in zip(list_of_files,list_of_data):
X=data[:,0]
Y=data[:,2]-data[:,1]
_label=_file.split('.')[0].split('_')[2]
plt.plot(X,Y,marker='.',label=_label)
plt.ylabel('Direct Gap [eV]')
plt.xlabel('G bands')
plt.legend()
plt.savefig('fig-02.png')
```
</details>
#### parse_ytiming.py
<details>
```python=
"""
Script used to parse the timings at the end of
Yambo reports file.
Usage: parse_ytimings.py head_calc_dir
head_calc_dir = first characters of the names of directories containing reports
Output:
- scaling plots
"""
from glob import glob
import sys
def parse_timing_data(separator1=" : ",separator2 ="s P",separator3 = "s C",separator4 = "m-"):
"""
Read timing sections of a list of report files
"""
# Get report files
n_runs = len(glob(sys.argv[1]+'*.out'))
reports = glob(sys.argv[1]+'*/r-*')
n_reports = len(reports)
if n_reports != n_runs:
raise ValueError("[ERR] report (%d) and run (%d) numbers are different"%(n_reports,n_runs))
# Produce a dictionary for each output
# Key: timing section name, Value: timing in seconds
time_dicts = []
n_tasks = []
for report in reports:
with open(report) as r:
lines = r.readlines()
# Find Timing Overview in the report
for il,line in enumerate(lines):
if "Threads total" in line: n_tasks.append(int(line.split(":")[1].strip()))
if "Timing Overview" in line: l0 = il
if "Memory Overview" in line: l1 = il
lines=lines[l0:l1]
time_dict = {}
for line in lines:
if separator1 in line:
aux = line.split(separator1)
key = aux[0].strip()
# Use separator2 for parallel runs, separator3 for serial
if separator2 in aux[1]: value = aux[1].split(separator2)[0].strip()
if separator3 in aux[1]: value = aux[1].split(separator3)[0].strip()
# Convert in seconds if minutes format found
if separator4 in value: value = float(value.split(separator4)[0])*60.+\
float(value.split(separator4)[1])
else: value = float(value)
time_dict[key]=value
time_dicts.append(time_dict)
# Last check
n_dicts = len(time_dicts)
if n_dicts!=n_reports:
raise ValueError("[ERR] Parsing problem (number of dicts. %d different from number of reports %d)"%(n_dicts,n_reports))
return n_tasks,time_dicts
def get_global_time(separator1="m-"):
"""
Read global time (MAX) from list of report files
"""
# Get report files
n_runs = len(glob(sys.argv[1]+'*.out'))
reports = glob(sys.argv[1]+'*/r-*')
n_reports = len(reports)
if n_reports != n_runs:
raise ValueError("[ERR] report and run number are different: ",n_reports,n_runs)
time_total = []
for report in reports:
with open(report) as r:
lines = r.readlines()
for line in lines:
if "[Time-Profile]" in line:
time= line.split(':')[1].strip('s\n')
if separator1 in time: time = float(time.split(separator1)[0])*60.+\
float(time.split(separator1)[1])
else: time = float(time)
time_total.append(time)
break
n_times = len(time_total)
if n_times!=n_reports:
raise ValueError("[ERR] Parsing problem (number of times %d different from number of reports %d)"%(n_times,n_reports))
return time_total
def plot_scaling(n_tasks,time_total):
"""
Plot scaling figure: times (s) vs n_tasks
"""
import matplotlib.pyplot as plt
import numpy as np
n_tasks = np.array(n_tasks)
time_total = np.array(time_total)
x = sorted(n_tasks)
y = time_total[np.argsort(n_tasks)]
plt.xticks(x)
plt.xlabel("N tasks")
plt.ylabel("Time (s)")
plt.plot(x,y, '.-', c='teal',markersize=10)
plt.savefig('gw_scaling.png')
#plt.show()
if __name__=="__main__":
if len(sys.argv)!=2:
print("Usage: > python parse_ytimings.py header_dir")
print("with argument being head chars of report directory i.e. 'header_dir'='run_MPI'")
exit()
n_tasks, time_dicts = parse_timing_data()
time_total = get_global_time()
plot_scaling(n_tasks,time_total)
```
</details>
#### plot_bands.py
<details>
```python=
import numpy as np
import matplotlib.pyplot as plt
import sys
def plot_bands(data1,data2):
"""
Plot bands (DFT vs GW)
"""
def get_high_sym_indices(Npts):
den=3.+np.sqrt(3.)
x,y,z=[np.sqrt(3.)/den,1./den,2./den]
return [0,int(x*Npts)-1,int((x+y)*Npts)-1,int(Npts)-1]
ax = plt.axes()
ax.set_ylabel("Energy (eV)")
Spts = get_high_sym_indices(len(data1))
Svals= [ data1[pt,0] for pt in Spts ]
ax.set_xticks(Svals)
ax.set_xticklabels(['G','M','K','G'])
for v in Svals: ax.axvline(v,c='black')
Nb = data1.shape[1]-1
for ib in range(Nb):
if ib==0:
ax.plot(data1[:,0],data1[:,ib+1], '-', c='red',label='dft')
ax.plot(data2[:,0],data2[:,ib+1], '-', c='blue',label='gw')
else:
ax.plot(data1[:,0],data1[:,ib+1], '-', c='red')
ax.plot(data2[:,0],data2[:,ib+1], '-', c='blue')
plt.legend()
plt.savefig('gw_bands.png')
plt.show()
if __name__=="__main__":
if len(sys.argv)!=4:
print("Usage: > python plot_bands.py DFT_bands_file GW_bands_file N_b")
print("with argument being name of interpolated bands file: DFT, GW")
print("and N_b the bands number")
exit()
Nb = int(sys.argv[3])
cols = [ i for i in range(Nb+1) ]
dft_bands = np.genfromtxt(sys.argv[1],usecols=(cols))
gw_bands = np.genfromtxt(sys.argv[2],usecols=(cols))
plot_bands(dft_bands,gw_bands)
```
</details>
###### tags: `Yambo` `GW calculations` `HPC` `tutorial`