# lit-gpt on DAWN ## setup The following needs adjusting the `HOME_DIR` path. ```{bash} HOME_DIR=/home/cs-laza1/rds-dawn-turing/cs-laza1 cd $HOME_DIR mkdir -p lit_gpt_workspace cd lit_gpt_workspace git clone https://github.com/tomaslaz/lit-GPT.git cd lit-GPT git checkout dawn !ignore git checkout 5e40219de82f15a521f17c71e399cbac895b473c git submodule init git submodule update nanogpt cd .. git clone https://github.com/tomaslaz/minGPT.git cd minGPT/ git checkout dawn cd .. mkdir -p $HOME_DIR/lit_gpt_workspace/conda_env conda create --prefix $HOME_DIR/lit_gpt_workspace/conda_env -y python=3.9 conda activate $HOME_DIR/lit_gpt_workspace/conda_env conda init --all module load default-dawn module load intel-oneapi-tbb/2021.11.0/oneapi/xtkj6nyp intel-oneapi-compilers/2024.0.0/gcc/znjudqsi intel-oneapi-mkl/2024.0.0/oneapi/4n7ruz44 intel-oneapi-mpi/2021.11.0/oneapi/h7nq7sah source /usr/local/dawn/software/spack/spack-views/dawn-test-2023-12-22/intel-oneapi-compilers-2024.0.0/gcc-13.2.0/znjudqsiaf6x5u2rxdtymf6ss55nmimw/compiler/2024.0/env/vars.sh source /usr/local/dawn/software/spack/spack-views/dawn-test-2023-12-22/intel-oneapi-mkl-2024.0.0/oneapi-2024.0.0/4n7ruz44nhbsd5xp4nnz6mgm2z7vqzxs/mkl/2024.0/env/vars.sh source /usr/local/dawn/software/spack/spack-views/dawn-test-2023-12-22/intel-oneapi-compilers-2024.0.0/gcc-13.2.0/znjudqsiaf6x5u2rxdtymf6ss55nmimw/setvars.sh --force export USE_XETLA=OFF export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 python -m pip install --upgrade pip python -m pip install torch==2.0.1a0 torchvision==0.15.2a0 intel-extension-for-pytorch==2.0.120+xpu oneccl-bind-pt==2.0.200 --extra-index-url https://pytorch-extension.intel.com/release-whl-aitools/ python -m pip install -r $HOME_DIR/lit_gpt_workspace/lit-GPT/requirements.txt ``` ## Job submission script The following needs adjusting the `HOME_DIR` path. ``` #!/bin/bash #SBATCH --job-name lit-gpt #SBATCH --account TURING-GPU #SBATCH --partition pvc #SBATCH --time 1:00:0 #SBATCH -N 2 #SBATCH --ntasks-per-node 4 #SBATCH -c 4 #SBATCH --gres=gpu:4 #SBATCH -o slurm.out # Don't tolerate errors set -e HOME_DIR="/home/cs-laza1" # Load modules echo "Load modules" module load intel-oneapi-tbb/2021.11.0/oneapi/xtkj6nyp module load intel-oneapi-compilers/2024.0.0/gcc/znjudqsi module load intel-oneapi-mkl/2024.0.0/oneapi/4n7ruz44 module load intel-oneapi-mpi/2021.11.0/oneapi/h7nq7sah module load gcc/13.2.0/ic25lr2r pushd /usr/local/dawn/software/spack/spack-views/dawn-test-2023-12-22/ source intel-oneapi-compilers-2024.0.0/gcc-13.2.0/znjudqsiaf6x5u2rxdtymf6ss55nmimw/compiler/2024.0/env/vars.sh source intel-oneapi-mkl-2024.0.0/oneapi-2024.0.0/4n7ruz44nhbsd5xp4nnz6mgm2z7vqzxs/mkl/2024.0/env/vars.sh source intel-oneapi-compilers-2024.0.0/gcc-13.2.0/znjudqsiaf6x5u2rxdtymf6ss55nmimw/setvars.sh popd # Activate conda environment echo "Activate conda environment" __conda_setup="$("$HOME_DIR/miniconda3/bin/conda" 'shell.bash' 'hook' 2> /dev/null)" if [ $? -eq 0 ]; then eval "$__conda_setup" else if [ -f "$HOME_DIR/miniconda3/etc/profile.d/conda.sh" ]; then . "$HOME_DIR/miniconda3/etc/profile.d/conda.sh" else export PATH="$HOME_DIR/miniconda3/bin:$PATH" fi fi unset __conda_setup CONDA_ENV=$HOME_DIR/lit-gpt-env/conda_env conda activate ${CONDA_ENV} export PYTHONPATH=$PYTHONPATH:$HOME_DIR/lit-gpt-env/minGPT # Source CCL bindings echo "Source CCL bindings" source $(python -c "import oneccl_bindings_for_pytorch as torch_ccl;print(torch_ccl.cwd)")/env/vars.sh # Set up environment variables # debugging flags export I_MPI_DEBUG=5 export I_MPI_OFFLOAD=1 export CCL_LOG_LEVEL=info export ZES_ENABLE_SYSMAN=1 ulimit -n 1048575 ulimit -u 1000000 # export USE_XETLA=OFF # export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 export CCL_ZE_IPC_EXCHANGE=sockets # COMPOSITE - see devices not tiles # FLAT - see tiles not devices export ZE_FLAT_DEVICE_HIERARCHY=COMPOSITE echo echo GPU Info using ZE_FLAT_DEVICE_HIERARCHY=${ZE_FLAT_DEVICE_HIERARCHY} echo ============================================ xpu-smi discovery --dump 1,2,16,19 echo ============================================ echo SLURM_JOB_NUM_NODES: ${SLURM_JOB_NUM_NODES} echo SLURM_NTASKS_PER_NODE: ${SLURM_NTASKS_PER_NODE} export WORLD_SIZE=$((${SLURM_JOB_NUM_NODES} * ${SLURM_NTASKS_PER_NODE})) echo WORLD_SIZE: ${WORLD_SIZE} # Check that we can call mpirun with something simple. echo echo ============================================ echo echo "MPI test:" echo mpirun -n ${WORLD_SIZE} -ppn ${SLURM_NTASKS_PER_NODE} -prepend-rank hostname echo echo ============================================ # # Getting the IP address of the first node # hostname=$(scontrol show hostname $SLURM_NODELIST | head -n 1) # ip_address=$(getent ahosts $hostname | head -n 1 | awk '{print $1}') # echo # echo "Running on $hostname with IP $ip_address" # echo # echo ============================================ # echo # export MASTER_ADDR=$ip_address # export MASTER_PORT=29502 # GPT-2 configuration CONFIG_LAYERS=12 CONFIG_HEAD=12 CONFIG_EMBD=768 CONFIG_PRECISION=16 CONFIG_BATCH_SIZE=64 CONFIG_STRATEGY=ddp CONFIG_NUM_WORKERS=36 # mpirun -n ${WORLD_SIZE} -ppn ${SLURM_NTASKS_PER_NODE} srun python $HOME_DIR/lit-gpt-env/lit-GPT/train.py \ --enable_progress_bar 0 \ --implementation mingpt \ --model_type None \ --n_layer ${CONFIG_LAYERS} \ --n_head ${CONFIG_HEAD} \ --n_embd ${CONFIG_EMBD} \ --precision ${CONFIG_PRECISION} \ --batch_size ${CONFIG_BATCH_SIZE} \ --max_epochs 1 \ --strategy ${CONFIG_STRATEGY} \ --num_nodes ${SLURM_JOB_NUM_NODES} \ --devices ${WORLD_SIZE} \ --num_workers ${CONFIG_NUM_WORKERS} echo "Deactivate virtual environment" deactivate echo "Done" ```
×
Sign in
Email
Password
Forgot password
or
By clicking below, you agree to our
terms of service
.
Sign in via Facebook
Sign in via Twitter
Sign in via GitHub
Sign in via Dropbox
Sign in with Wallet
Wallet (
)
Connect another wallet
New to HackMD?
Sign up