#pragma section-numbers 2 = MPI-Start as EMI-ES ParallelEnvironment backend = <> == CREAM == Cream support for the !ParallelEnvironment uses mpi-start out of the box for its implementation. There is no need for extra configuration. The default job generated by CREAM looks like this: {{{ /usr/bin/mpi-start -t -npnode -d THREADS_PER_PROCESS= }}} == UNICORE == The UNICORE EMI-ES !ParallelEnvironment is also a complete implementation of the specification. Configuration is done via xml files where the mapping from the job specification to the final executable is specified. For each mpi-flavor that needs to be supported by mpi-start an entry in the idb must be included. For example for OpenMPI: {{{#!highlight xml OpenMPI 1.5.4 Runs OpenMPI job using mpi-start to start the job /usr/bin/mpi-start #EXECUTABLE -t openmpi #ARGS #USERCOMMAND #USERARGS Output -o string Write the job output to a FILE instead of to the standard output stream PostHook -post string Use the file as post hook PreHook -pre string Use the file as pre hook Error -e string Write the job error to a FILE instead of to the standard error stream Verbose -v Enable verbose mode PerNode -pnode Start one process per available node PerSocket -psocket Start one process per available socket PerCore -pcore Start one process per available core }}} == ARC == ARC supports the !ParallelEnvironment through the !RunTimeEnvironments. You need to create a RTE for the !ParallelEnvironment that uses mpi-start: {{{#!highlight sh #!/bin/sh case "$1" in 0) TYPE=`echo "$joboption_penv_type" | tr [:upper:] [:lower:]` OPTS="-t $TYPE" if [ "x$joboption_penv_procperslot" != "x" ] ; then OPTS="$OPTS -npnode $joboption_penv_procperslot" fi if [ "x$joboption_penv_threadsperslot" != "x" ] ; then OPTS="$OPTS -d THREADS_PER_CORE=$joboption_penv_threadsperslot" fi joboption_args="mpi-start $OPTS -- $joboption_args" ;; 1) ;; 2) ;; *) return 1 ;; esac }}} In the user job, you should include both the RTE (e.g. MPISTART) and PE in the job description (only shown the relevant elements): {{{#!highlight xml MPISTART OpenMPI }}} == Testing == Testing the use of PEs with mpi-start was done as part of mpi-start 1.5.0 testing. Check [[https://bitbucket.org/enolfc/mpi-start/wiki/emi-MPIv1.5.0/integration|the integration tests]] for details. == Sample Job == This is a sample job that can be submitted to the EMI-ES endpoints: {{{#!highlight xml test job A test job showing the features of EMI-ES single test cpi.c std.err std.out I2G_MPI_PRE_RUN_HOOKpre.sh I2G_MPI_START_VERBOSE1 2 OpenMPI pre.sh pre.sh cpi.c cpi.c }}} The hook (`pre.sh`) is the following: {{{#!highlight sh #!/bin/sh pre_run_hook () { # Compile the program. info_msg "Compiling ${I2G_MPI_APPLICATION}" export I2G_MPI_APPLICATION=`echo $I2G_MPI_APPLICATION | sed -e "s/\.c$//"` # Actually compile the program. cmd="${MPI_MPICC} ${MPI_MPICC_OPTS} -o ${I2G_MPI_APPLICATION} ${I2G_MPI_APPLICATION}.c" info_msg $cmd $cmd st=$? if [ $st -ne 0 ]; then error_msg "Error compiling program. Exiting..." return $st fi # Everything's OK. info_msg "Successfully compiled ${I2G_MPI_APPLICATION}" return 0 } }}} And the C code (`cpi.c`): {{{#!highlight c #include "mpi.h" #include #include double f( double ); double f( double a ) { return (4.0 / (1.0 + a*a)); } int main( int argc, char *argv[]) { int n_intervals = 16384; int done = 0, n, myid, numprocs, i; double PI25DT = 3.141592653589793238462643; double mypi, pi, h, sum, x; double startwtime = 0.0, endwtime; int namelen; char processor_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&numprocs); MPI_Comm_rank(MPI_COMM_WORLD,&myid); MPI_Get_processor_name(processor_name,&namelen); fprintf(stderr,"Process %d on %s: n=%d\n",myid, processor_name,n); if (numprocs >= 1) { if( myid == 0 ) fprintf(stderr,"Using %d intervals\n",n_intervals); n = 0; while (!done) { if (myid == 0) { startwtime = MPI_Wtime(); } if( n == 0 ) n = n_intervals; else n = 0; MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); if (n == 0) done = 1; else { h = 1.0 / (double) n; sum = 0.0; for (i = myid + 1; i <= n; i += numprocs) { x = h * ((double)i - 0.5); sum += f(x); } mypi = h * sum; MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if (myid == 0) { printf("pi is approximately %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT)); endwtime = MPI_Wtime(); printf("wall clock time = %f\n", endwtime-startwtime); } } } } else { fprintf(stderr,"Only 1 process, not doing anything"); } MPI_Finalize(); return 0; } }}}