This is a slurm script for cpu stress with mpi method
To run
sbatch -N2 -pbigmem --ntasks-per-node=2 -t0:10:00 --mem=1G cpu.sl cpu.c 10
#!/bin/bash
#SBATCH --job-name=stress_test
#SBATCH --output=cpu.out
#SBATCH --error=cpu.err
# Get the source file and exponent from the command line arguments
SRC="$1" # Source file (e.g., cpu.c)
N_EXP="$2" # Exponent (e.g., 8)
# Check if both arguments are provided
if [ -z "$SRC" ] || [ -z "$N_EXP" ]; then
echo "Usage: sbatch <script_name> <source_file> <exponent>"
exit 1
fi
# Extract the object file name from the source file
OBJ=$(basename "$SRC" .c) # Removes .c and uses the base name
# Compile the program
mpicc -o "${OBJ}" "${SRC}" -lm
# Run the program using srun. Use SLURM environment variables for node and task counts.
srun --mpi=pmix_v4 "./${OBJ}" "${N_EXP}"
# Remove the object file (optional)
rm "${OBJ}"
script
The script cpu.c
is as follows:
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
void stress_test(int rank, long workload_start, long workload_end) {
double result = 0.0;
for (long i = workload_start; i < workload_end; i++) {
result += sin(rank * i * 0.0001) * cos(i * 0.0001);
result += exp(rank * 0.0001) / (i + 1);
if (i % 100000 == 0) {
result = fmod(result, 1000.0);
}
}
printf("Process %d completed. Result = %f\n", rank, result);
}
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int world_size, world_rank;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
if (world_size < 2) {
if (world_rank == 0) {
fprintf(stderr, "This program requires at least 2 processes.\n");
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
if (argc != 2) {
if (world_rank == 0) {
fprintf(stderr, "Usage: %s <exponent>\n", argv[0]);
}
MPI_Finalize();
return 1;
}
int exponent = atoi(argv[1]);
if (exponent <= 0) {
if (world_rank == 0) {
fprintf(stderr, "Exponent must be a positive integer.\n");
}
MPI_Finalize();
return 1;
}
long total_workload = pow(10, exponent);
long workload_per_process = total_workload / world_size;
long remainder = total_workload % world_size; // Handle uneven distribution
long workload_start = world_rank * workload_per_process;
long workload_end = workload_start + workload_per_process;
// Distribute the remainder across the first few processes
if (world_rank < remainder) {
workload_end++;
}
stress_test(world_rank, workload_start, workload_end);
MPI_Finalize();
return 0;
}