Rmpi
From FarmShare
(Difference between revisions)
Line 1: | Line 1: | ||
== Rmpi == | == Rmpi == | ||
- | Rmpi is R with mpi support. On the barley this means OpenMPI. Rmpi can be installed from [http://cran.r-project.org/ CRAN] packages | + | Rmpi is R with mpi support. On the barley this means OpenMPI. Rmpi can be installed from [http://cran.r-project.org/ CRAN] packages if you wish. It is installed on the barley in /mnt/glusterfs/apps/R as a convenience. |
== setup == | == setup == | ||
- | grid engine submit script (rmpi.submit) | + | grid engine submit script (rmpi.submit) |
- | < | + | <br> |
- | + | <pre># | |
- | <pre> | + | |
- | # | + | |
#$ -cwd | #$ -cwd | ||
#$ -j y | #$ -j y | ||
Line 20: | Line 18: | ||
echo "Got $NSLOTS slots" echo "jobid $JOB_ID" | echo "Got $NSLOTS slots" echo "jobid $JOB_ID" | ||
- | + | cat $PE_HOSTFILE | |
+ | mpirun -np $NSLOTS -machinefile $tmphosts R --no-save -q < ~/Rmpitest.R | ||
- | + | <pre> | |
+ | # This R profile can be used when a cluster does not allow spawning or a job | ||
+ | # scheduler is required to launch any parallel jobs. Saving this file as | ||
+ | # .Rprofile in the working directory or root directory. For unix platform, run | ||
+ | # mpirexec -n [cpu numbers] R --no-save -q | ||
+ | # For windows platform with mpich2, use mpiexec wrapper and specify a working | ||
+ | # directory where .Rprofile is inside. | ||
+ | # Cannot be used as Rprofile.site because it will not work | ||
- | + | # Following system libraries are not loaded automatically. So manual loads are | |
+ | # needed. | ||
- | |||
- | + | .libPaths(c("/mnt/glusterfs/apps/R", "/usr/lib/R/library")) | |
- | + | library(utils) | |
- | + | library(stats) | |
- | + | library(datasets) | |
- | + | library(grDevices) | |
- | + | library(graphics) | |
- | + | library(methods) | |
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | library(utils) library(stats) library(datasets) library(grDevices) library(graphics) library(methods) | + | |
- | + | ||
- | + | ||
+ | if (!invisible(library(Rmpi,logical.return = TRUE))){ | ||
warning("Rmpi cannot be loaded") | warning("Rmpi cannot be loaded") | ||
- | + | q(save = "no") | |
+ | } | ||
- | + | options(error=quote(assign(".mpi.err", FALSE, env = .GlobalEnv))) | |
- | + | ||
- | options(error=quote(assign(".mpi.err", FALSE, env = .GlobalEnv)) | + | |
- | + | ||
- | + | ||
+ | if (mpi.comm.size(0) > 1) | ||
invisible(mpi.comm.dup(0,1)) | invisible(mpi.comm.dup(0,1)) | ||
- | if (mpi.comm.rank(0) >0){ | + | if (mpi.comm.rank(0) >0){ |
- | + | ||
#sys.load.image(".RData",TRUE) | #sys.load.image(".RData",TRUE) | ||
- | + | options(echo=FALSE) | |
- | + | .comm <- 1 | |
- | + | mpi.barrier(0) | |
- | + | repeat | |
- | + | try(eval(mpi.bcast.cmd(rank=0,comm=.comm)),TRUE) | |
- | try(eval(mpi.bcast.cmd(rank=0,comm=.comm)),TRUE) #try(eval(mpi.bcast.cmd(rank=0,comm=.comm),env=sys.parent()),TRUE) | + | #try(eval(mpi.bcast.cmd(rank=0,comm=.comm),env=sys.parent()),TRUE) |
- | + | ||
#mpi.barrier(.comm) | #mpi.barrier(.comm) | ||
- | + | if (is.loaded("mpi_comm_disconnect")) | |
- | + | mpi.comm.disconnect(.comm) | |
- | + | else mpi.comm.free(.comm) | |
- | + | mpi.quit() | |
- | + | } | |
- | } | + | |
- | + | if (mpi.comm.rank(0)==0) { | |
- | if (mpi.comm.rank(0)==0) { | + | |
- | + | ||
#options(echo=TRUE) | #options(echo=TRUE) | ||
- | + | mpi.barrier(0) | |
- | + | if(mpi.comm.size(0) > 1) | |
- | + | slave.hostinfo(1) | |
- | + | } | |
- | } | + | |
- | + | ||
- | + | ||
+ | .Last <- function(){ | ||
if (is.loaded("mpi_initialize")){ | if (is.loaded("mpi_initialize")){ | ||
- | + | if (mpi.comm.size(1) > 1){ | |
- | + | print("Please use mpi.close.Rslaves() to close slaves") | |
- | + | mpi.close.Rslaves(comm=1) | |
- | + | } | |
- | + | } | |
- | + | print("Please use mpi.quit() to quit R") | |
- | + | mpi.quit() | |
- | + | } | |
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | + | ||
- | < | + | </pre> |
- | #Tell all slaves to return a message identifying themselves | + | <br> |
+ | <pre># Tell all slaves to return a message identifying themselves | ||
mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size())) | mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size())) | ||
- | #Tell all slaves to close down, and exit the program | + | # Tell all slaves to close down, and exit the program |
- | + | ||
- | + | mpi.close.Rslaves() | |
- | + | mpi.quit() | |
- | + | </pre> |
Revision as of 00:31, 15 February 2012
Rmpi
Rmpi is R with mpi support. On the barley this means OpenMPI. Rmpi can be installed from CRAN packages if you wish. It is installed on the barley in /mnt/glusterfs/apps/R as a convenience.
setup
grid engine submit script (rmpi.submit)
# #$ -cwd #$ -j y #$ -S /bin/bash #$ -N rmpi32 #$ -pe orte 32 tmphosts=`mktemp` awk '{ for (i=0; i < $2; ++i) { print $1} }' $PE_HOSTFILE > $tmphosts echo "Got $NSLOTS slots" echo "jobid $JOB_ID" cat $PE_HOSTFILE mpirun -np $NSLOTS -machinefile $tmphosts R --no-save -q < ~/Rmpitest.R <pre> # This R profile can be used when a cluster does not allow spawning or a job # scheduler is required to launch any parallel jobs. Saving this file as # .Rprofile in the working directory or root directory. For unix platform, run # mpirexec -n [cpu numbers] R --no-save -q # For windows platform with mpich2, use mpiexec wrapper and specify a working # directory where .Rprofile is inside. # Cannot be used as Rprofile.site because it will not work # Following system libraries are not loaded automatically. So manual loads are # needed. .libPaths(c("/mnt/glusterfs/apps/R", "/usr/lib/R/library")) library(utils) library(stats) library(datasets) library(grDevices) library(graphics) library(methods) if (!invisible(library(Rmpi,logical.return = TRUE))){ warning("Rmpi cannot be loaded") q(save = "no") } options(error=quote(assign(".mpi.err", FALSE, env = .GlobalEnv))) if (mpi.comm.size(0) > 1) invisible(mpi.comm.dup(0,1)) if (mpi.comm.rank(0) >0){ #sys.load.image(".RData",TRUE) options(echo=FALSE) .comm <- 1 mpi.barrier(0) repeat try(eval(mpi.bcast.cmd(rank=0,comm=.comm)),TRUE) #try(eval(mpi.bcast.cmd(rank=0,comm=.comm),env=sys.parent()),TRUE) #mpi.barrier(.comm) if (is.loaded("mpi_comm_disconnect")) mpi.comm.disconnect(.comm) else mpi.comm.free(.comm) mpi.quit() } if (mpi.comm.rank(0)==0) { #options(echo=TRUE) mpi.barrier(0) if(mpi.comm.size(0) > 1) slave.hostinfo(1) } .Last <- function(){ if (is.loaded("mpi_initialize")){ if (mpi.comm.size(1) > 1){ print("Please use mpi.close.Rslaves() to close slaves") mpi.close.Rslaves(comm=1) } } print("Please use mpi.quit() to quit R") mpi.quit() }
# Tell all slaves to return a message identifying themselves mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size())) # Tell all slaves to close down, and exit the program mpi.close.Rslaves() mpi.quit()