= OpenMP C = If you have not done yet, download the Sample files by: {{{#!sh git clone https://hidekiCCS:@bitbucket.org/hidekiCCS/hpc-workshop.git }}} == Hello World == '''hpc-workshop/SimpleExample/C/hello_OMP''' {{{#!C /* hellp_omp.c: display a message on the screen */ #include #include int main () { int id, nthreads; printf("C Start\n"); #pragma omp parallel private(id) { id = omp_get_thread_num(); printf("hello from %d\n", id); #pragma omp barrier if ( id == 0 ) { nthreads = omp_get_num_threads(); printf("%d threads said hello!\n",nthreads); } } printf("End\n"); } }}} === Compile with GNU C === {{{#!sh gcc hello_omp.c -fopenmp }}} === Compile with Intel C === {{{#!sh module load intel-psxe icc hello_omp.c -openmp }}} === Example of Jobscript === '''slurmscript''' {{{#!sh #!/bin/bash #SBATCH --partition=defq # Partition (default is 'defq') #SBATCH --qos=normal # Quality of Service #SBATCH --job-name=helloC_OMP # Job Name #SBATCH --time=00:10:00 # WallTime #SBATCH --nodes=1 # Number of Nodes #SBATCH --ntasks-per-node=1 # Number of tasks (MPI processes) #SBATCH --cpus-per-task=4 # Number of processors per task OpenMP threads() #SBATCH --gres=mic:0 # Number of Co-Processors #module load intel-psxe pwd echo "DIR=" $SLURM_SUBMIT_DIR echo "TASKS_PER_NODE=" $SLURM_TASKS_PER_NODE echo "NNODES=" $SLURM_NNODES echo "NTASKS" $SLURM_NTASKS echo "JOB_CPUS_PER_NODE" $SLURM_JOB_CPUS_PER_NODE echo $SLURM_NODELIST export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK ./a.out echo "End of Job" }}} Submit a job {{{ sbatch slurmscript }}} Check the status of your jobs {{{ squeue -u USERNAME }}}