| | 41 | |
| | 42 | === Run Gnu Parallel in Slurm script === |
| | 43 | ==== Single Node ==== |
| | 44 | The job script below requests 1 node 20 cores (one whole node). Assuming that each task is multi-thread, and '''OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK''' determines the number of threads. '''"-j $SLURM_NTASKS"''' determines the number of concurrently running tasks. |
| | 45 | {{{ |
| | 46 | #!/bin/bash |
| | 47 | #SBATCH --partition=defq # Partition |
| | 48 | #SBATCH --qos=normal # Quality of Service |
| | 49 | #SBATCH --job-name=GNU_Paralle # Job Name |
| | 50 | #SBATCH --time=00:10:00 # WallTime |
| | 51 | #SBATCH --nodes=1 # Number of Nodes |
| | 52 | #SBATCH --ntasks-per-node=5 # Number of tasks |
| | 53 | #SBATCH --cpus-per-task=4 # Number of processors per task OpenMP threads() |
| | 54 | #SBATCH --gres=mic:0 # Number of Co-Processors |
| | 55 | |
| | 56 | module load parallel |
| | 57 | |
| | 58 | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK |
| | 59 | |
| | 60 | parallel --record-env |
| | 61 | |
| | 62 | parallel --joblog log \ |
| | 63 | -j $SLURM_NTASKS \ |
| | 64 | --workdir $SLURM_SUBMIT_DIR \ |
| | 65 | --env OMP_NUM_THREADS \ |
| | 66 | sh ./run_hostname.sh {} ::: `seq 1 100` |
| | 67 | }}} |
| | 68 | |
| | 69 | The example task script is: |
| | 70 | {{{ |
| | 71 | #!/bin/bash |
| | 72 | hostname |
| | 73 | echo $1 |
| | 74 | sleep 1 |
| | 75 | }}} |
| | 76 | |
| | 77 | ==== Multiple Nodes ==== |
| | 78 | The job script below requests 4 nodes 20 cores for each. Assuming that each task is multi-thread, and '''OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK''' determines the number of threads. '''"-j $TASKS_PER_NODE"''' determines the number of concurrently running tasks per node, which is defined by '''TASKS_PER_NODE=`echo $SLURM_NTASKS / $SLURM_NNODES | bc`'''. '''scontrol show hostname $SLURM_NODELIST > machinefile''' makes a list of nodes and send it to GNU parallel by "--slf $MACHINEFILE". |
| | 79 | {{{ |
| | 80 | #!/bin/bash |
| | 81 | #SBATCH --partition=defq # Partition |
| | 82 | #SBATCH --qos=normal # Quality of Service |
| | 83 | #SBATCH --job-name=GNU_Paralle # Job Name |
| | 84 | #SBATCH --time=00:10:00 # WallTime |
| | 85 | #SBATCH --nodes=4 # Number of Nodes |
| | 86 | #SBATCH --ntasks-per-node=5 # Number of tasks |
| | 87 | #SBATCH --cpus-per-task=4 # Number of processors per task OpenMP threads() |
| | 88 | #SBATCH --gres=mic:0 # Number of Co-Processors |
| | 89 | |
| | 90 | module load parallel |
| | 91 | |
| | 92 | MACHINEFILE="machinefile" |
| | 93 | scontrol show hostname $SLURM_NODELIST > machinefile |
| | 94 | cat $MACHINEFILE |
| | 95 | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK |
| | 96 | TASKS_PER_NODE=`echo $SLURM_NTASKS / $SLURM_NNODES | bc` |
| | 97 | echo "TASKS_PER_NODE=" $TASKS_PER_NODE |
| | 98 | |
| | 99 | parallel --record-env |
| | 100 | |
| | 101 | parallel --joblog log \ |
| | 102 | -j $TASKS_PER_NODE \ |
| | 103 | --slf $MACHINEFILE \ |
| | 104 | --workdir $SLURM_SUBMIT_DIR \ |
| | 105 | --sshdelay 0.1 \ |
| | 106 | --env OMP_NUM_THREADS \ |
| | 107 | sh ./run_hostname.sh {} ::: `seq 1 100` |
| | 108 | |
| | 109 | echo "took $SECONDS sec" |
| | 110 | }}} |