Rmpi
From FarmShare
(Created page with "== Rmpi == Rmpi is R with mpi support. On the barley this means OpenMPI. Rmpi can be installed from CRAN if you wish. It is installed on the barley in /mnt/glusterfs/apps/R as ...") |
|||
Line 1: | Line 1: | ||
== Rmpi == | == Rmpi == | ||
- | Rmpi is R with mpi support. On the barley this means OpenMPI. Rmpi can be installed from CRAN if you wish. It is installed on the barley in /mnt/glusterfs/apps/R as a convenience. | + | Rmpi is R with mpi support. On the barley this means OpenMPI. Rmpi can be installed from [http://cran.r-project.org/ CRAN] packages if you wish. It is installed on the barley in /mnt/glusterfs/apps/R as a convenience. |
== setup == | == setup == | ||
- | grid engine submit script (rmpi.submit) <code></code> | + | grid engine submit script (rmpi.submit) <code></code> |
- | <code> | + | <code></code> |
+ | |||
+ | <pre> | ||
# | # | ||
#$ -cwd | #$ -cwd | ||
Line 17: | Line 19: | ||
tmphosts=`mktemp` awk '{ for (i=0; i < $2; ++i) { print $1} }' $PE_HOSTFILE > $tmphosts | tmphosts=`mktemp` awk '{ for (i=0; i < $2; ++i) { print $1} }' $PE_HOSTFILE > $tmphosts | ||
- | echo "Got $NSLOTS slots" echo "jobid $JOB_ID" | + | echo "Got $NSLOTS slots" echo "jobid $JOB_ID" |
+ | </pre> | ||
- | cat $PE_HOSTFILE | + | <pre>cat $PE_HOSTFILE </pre> |
- | </ | + | |
<code>mpirun -np $NSLOTS -machinefile $tmphosts R --no-save -q < ~/Rmpitest.R </code> | <code>mpirun -np $NSLOTS -machinefile $tmphosts R --no-save -q < ~/Rmpitest.R </code> | ||
+ | |||
+ | <code></code> | ||
<code></code> | <code></code> | ||
Line 45: | Line 49: | ||
warning("Rmpi cannot be loaded") | warning("Rmpi cannot be loaded") | ||
- | + | q(save = "no") | |
} | } | ||
Line 58: | Line 62: | ||
#sys.load.image(".RData",TRUE) | #sys.load.image(".RData",TRUE) | ||
- | + | options(echo=FALSE) | |
- | + | .comm <- 1 | |
- | + | mpi.barrier(0) | |
- | + | repeat | |
try(eval(mpi.bcast.cmd(rank=0,comm=.comm)),TRUE) #try(eval(mpi.bcast.cmd(rank=0,comm=.comm),env=sys.parent()),TRUE) | try(eval(mpi.bcast.cmd(rank=0,comm=.comm)),TRUE) #try(eval(mpi.bcast.cmd(rank=0,comm=.comm),env=sys.parent()),TRUE) | ||
#mpi.barrier(.comm) | #mpi.barrier(.comm) | ||
- | + | if (is.loaded("mpi_comm_disconnect")) | |
- | + | mpi.comm.disconnect(.comm) | |
- | + | else mpi.comm.free(.comm) | |
- | + | mpi.quit() | |
} | } | ||
Line 76: | Line 80: | ||
#options(echo=TRUE) | #options(echo=TRUE) | ||
- | + | mpi.barrier(0) | |
- | + | if(mpi.comm.size(0) > 1) | |
- | + | slave.hostinfo(1) | |
} | } | ||
Line 85: | Line 89: | ||
if (is.loaded("mpi_initialize")){ | if (is.loaded("mpi_initialize")){ | ||
- | + | if (mpi.comm.size(1) > 1){ | |
- | + | print("Please use mpi.close.Rslaves() to close slaves") | |
- | + | mpi.close.Rslaves(comm=1) | |
- | + | } | |
- | + | } | |
- | + | print("Please use mpi.quit() to quit R") | |
- | + | mpi.quit() | |
</code> | </code> | ||
+ | |||
+ | <code></code> | ||
<code>} </code> | <code>} </code> | ||
- | <br> <code></code> | + | <br> <code></code> |
+ | |||
+ | <code></code> | ||
<code> | <code> | ||
Line 105: | Line 113: | ||
#Tell all slaves to close down, and exit the program | #Tell all slaves to close down, and exit the program | ||
</code> | </code> | ||
+ | |||
+ | <code></code> | ||
<code>mpi.close.Rslaves() mpi.quit() </code> | <code>mpi.close.Rslaves() mpi.quit() </code> |
Revision as of 00:22, 15 February 2012
Rmpi
Rmpi is R with mpi support. On the barley this means OpenMPI. Rmpi can be installed from CRAN packages if you wish. It is installed on the barley in /mnt/glusterfs/apps/R as a convenience.
setup
grid engine submit script (rmpi.submit)
# #$ -cwd #$ -j y #$ -S /bin/bash #$ -N rmpi32 #$ -pe orte 32 tmphosts=`mktemp` awk '{ for (i=0; i < $2; ++i) { print $1} }' $PE_HOSTFILE > $tmphosts echo "Got $NSLOTS slots" echo "jobid $JOB_ID"
cat $PE_HOSTFILE
mpirun -np $NSLOTS -machinefile $tmphosts R --no-save -q < ~/Rmpitest.R
- This R profile can be used when a cluster does not allow spawning or a job
- scheduler is required to launch any parallel jobs. Saving this file as
- .Rprofile in the working directory or root directory. For unix platform, run
- mpirexec -n [cpu numbers] R --no-save -q
- For windows platform with mpich2, use mpiexec wrapper and specify a working
- directory where .Rprofile is inside.
- Cannot be used as Rprofile.site because it will not work
- Following system libraries are not loaded automatically. So manual loads are
- needed.
.libPaths(c("/mnt/glusterfs/apps/R", "/usr/lib/R/library"))
library(utils) library(stats) library(datasets) library(grDevices) library(graphics) library(methods)
if (!invisible(library(Rmpi,logical.return = TRUE))){
warning("Rmpi cannot be loaded") q(save = "no")
}
options(error=quote(assign(".mpi.err", FALSE, env = .GlobalEnv)))
if (mpi.comm.size(0) > 1)
invisible(mpi.comm.dup(0,1))
if (mpi.comm.rank(0) >0){
#sys.load.image(".RData",TRUE) options(echo=FALSE) .comm <- 1 mpi.barrier(0) repeat
try(eval(mpi.bcast.cmd(rank=0,comm=.comm)),TRUE) #try(eval(mpi.bcast.cmd(rank=0,comm=.comm),env=sys.parent()),TRUE)
#mpi.barrier(.comm) if (is.loaded("mpi_comm_disconnect")) mpi.comm.disconnect(.comm) else mpi.comm.free(.comm) mpi.quit()
}
if (mpi.comm.rank(0)==0) {
#options(echo=TRUE) mpi.barrier(0) if(mpi.comm.size(0) > 1) slave.hostinfo(1)
}
.Last <- function(){
if (is.loaded("mpi_initialize")){ if (mpi.comm.size(1) > 1){ print("Please use mpi.close.Rslaves() to close slaves") mpi.close.Rslaves(comm=1) } } print("Please use mpi.quit() to quit R") mpi.quit()
}
- Tell all slaves to return a message identifying themselves
mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size()))
- Tell all slaves to close down, and exit the program
mpi.close.Rslaves() mpi.quit()