positive_integer_radius = 30 n = positive_integer_radius * 2+1 index = matrix((0:(n * n-1)) %% n-positive_integer_radius, nrow = n) x = 0:(n-1) - positive_integer_radius y = x z = sqrt(positive_integer_radius * positive_integer_radius - index^2 - t(index)^2) z[is.na(z)] = 0 postscript("perspective.eps", horizontal = FALSE, height = 4, width = 5, pointsize = 10) persp(x, y, z, phi = 30, theta = 0, lphi = 60, ltheta = 70, border = NA, col = "dark blue", shade = 1, scale = FALSE) dev.off() postscript("contour.eps", horizontal = FALSE, height = 4, width = 5, pointsize = 10) contour(x, y, z) dev.off()
#!/bin/bash #SBATCH --job-name=hemisphere #SBATCH --output=slurm.out #SBATCH --error=slurm.err #SBATCH --partition=develop R --no-save < hemisphere.R
[araim1@tara-fe1 hemisphere-R]$ sbatch run.slurm sbatch: Submitted batch job 2623 [araim1@tara-fe1 hemisphere-R]$
[araim1@tara-fe1 hemisphere-R]$ ls contour.eps hemisphere.gif perspective.eps slurm.out hemisphere.R run.slurm slurm.err [araim1@tara-fe1 hemisphere-R]$
[araim1@tara-fe1 ~]$ switcher mpi = gcc-openmpi-1.3.3-p1 [araim1@tara-fe1 ~]$ switcher_reload
R_LIBS=/usr/cluster/contrib/gcc-openmpi-1.3.3/Rlibs
R_LIBS=/usr/cluster/contrib/gcc-openmpi-1.3.3/Rlibs:/path/to/more/libs
[araim1@tara-fe1 ~]$ R R version 2.11.1 (2010-05-31) Copyright (C) 2010 The R Foundation for Statistical Computing ISBN 3-900051-07-0 ... > .libPaths() [1] "/usr/cluster/contrib/gcc-openmpi-1.3.3/Rlibs" [2] "/usr/lib64/R/library" > library(snow) > library(Rmpi)
library(Rmpi) library(snow) # Initialize SNOW using MPI communication. The first line will get the # number of MPI processes the scheduler assigned to us. Everything else # is standard SNOW np <- mpi.universe.size() cluster <- makeMPIcluster(np) # Print the hostname for each cluster member sayhello <- function() { info <- Sys.info()[c("nodename", "machine")] paste("Hello from", info[1], "with CPU type", info[2]) } names <- clusterCall(cluster, sayhello) print(unlist(names)) # Compute row sums in parallel using all processes, # then a grand sum at the end on the master process parallelSum <- function(m, n) { A <- matrix(rnorm(m*n), nrow = m, ncol = n) row.sums <- parApply(cluster, A, 1, sum) print(sum(row.sums)) } parallelSum(500, 500) stopCluster(cluster) mpi.exit()
#!/bin/bash #SBATCH --job-name=snow-mpi-test #SBATCH --output=slurm.out #SBATCH --error=slurm.err #SBATCH --partition=develop #SBATCH --nodes=2 #SBATCH --ntasks-per-node=3 mpirun -np 1 R --no-save < snow-test.R
[araim1@tara-fe1 snow-mpi-test]$ sbatch run.slurm sbatch: Submitted batch job 2648 [araim1@tara-fe1 snow-mpi-test]$
[araim1@tara-fe1 SNOW]$ cat slurm.out [... R disclaimer message ...] > library(Rmpi) > library(snow) > > # Initialize SNOW using MPI communication. The first line will get the > # number of MPI processes the scheduler assigned to us. Everything else > # is standard SNOW > > np <- mpi.universe.size() > cluster <- makeMPIcluster(np) 8 slaves are spawned successfully. 0 failed. > > # Print the hostname for each cluster member > sayhello <- function() + { + info <- Sys.info()[c("nodename", "machine")] + paste("Hello from", info[1], "with CPU type", info[2]) + } > > names <- clusterCall(cluster, sayhello) > print(unlist(names)) [1] "Hello from n1 with CPU type x86_64" "Hello from n1 with CPU type x86_64" [3] "Hello from n1 with CPU type x86_64" "Hello from n2 with CPU type x86_64" [5] "Hello from n2 with CPU type x86_64" "Hello from n2 with CPU type x86_64" [7] "Hello from n2 with CPU type x86_64" "Hello from n1 with CPU type x86_64" > > # Compute row sums in parallel using all processes, > # then a grand sum at the end on the master process > parallelSum <- function(m, n) + { + A <- matrix(rnorm(m*n), nrow = m, ncol = n) + row.sums <- parApply(cluster, A, 1, sum) + print(sum(row.sums)) + } > > parallelSum(500, 500) [1] 13.40937 > > stopCluster(cluster) [1] 1 > mpi.exit() [1] "Detaching Rmpi. Rmpi cannot be used unless relaunching R." >
library(Rmpi) mpi.spawn.Rslaves(needlog = FALSE) mpi.bcast.cmd( id <- mpi.comm.rank() ) mpi.bcast.cmd( np <- mpi.comm.size() ) mpi.bcast.cmd( host <- mpi.get.processor.name() ) result <- mpi.remote.exec(paste("I am", id, "of", np, "running on", host)) print(unlist(result)) mpi.close.Rslaves(dellog = FALSE) mpi.exit()
An MPI process has executed an operation involving a call to the "fork()" system call to create a child process. Open MPI is currently operating in a condition that could result in memory corruption or other system errors...The option "needlog = FALSE" tells Rmpi not to create the extra log files associated with this cleanup. If you would like to see the log files along with your results, do not specify this option.
#!/bin/bash #SBATCH --job-name=Rmpi_hello #SBATCH --output=slurm.out #SBATCH --error=slurm.err #SBATCH --partition=develop #SBATCH --nodes=2 #SBATCH --ntasks-per-node=4 mpirun -np 1 R --no-save < hello.R
[araim1@tara-fe1 Rmpi-test]$ cat slurm.out [... R disclaimer message ...] > library(Rmpi) > mpi.spawn.Rslaves(needlog = FALSE) 8 slaves are spawned successfully. 0 failed. master (rank 0, comm 1) of size 9 is running on: n1 slave1 (rank 1, comm 1) of size 9 is running on: n1 slave2 (rank 2, comm 1) of size 9 is running on: n1 slave3 (rank 3, comm 1) of size 9 is running on: n1 slave4 (rank 4, comm 1) of size 9 is running on: n2 slave5 (rank 5, comm 1) of size 9 is running on: n2 slave6 (rank 6, comm 1) of size 9 is running on: n2 slave7 (rank 7, comm 1) of size 9 is running on: n2 slave8 (rank 8, comm 1) of size 9 is running on: n1 > > mpi.bcast.cmd( id <- mpi.comm.rank() ) > mpi.bcast.cmd( np <- mpi.comm.size() ) > mpi.bcast.cmd( host <- mpi.get.processor.name() ) > result <- mpi.remote.exec(paste("I am", id, "of", np, "running on", host)) > > print(unlist(result)) slave1 slave2 "I am 1 of 9 running on n1" "I am 2 of 9 running on n1" slave3 slave4 "I am 3 of 9 running on n1" "I am 4 of 9 running on n2" slave5 slave6 "I am 5 of 9 running on n2" "I am 6 of 9 running on n2" slave7 slave8 "I am 7 of 9 running on n2" "I am 8 of 9 running on n1" > > mpi.close.Rslaves(dellog = FALSE) [1] 1 > mpi.exit() [1] "Detaching Rmpi. Rmpi cannot be used unless relaunching R." >
library(Rmpi) mpi.spawn.Rslaves(needlog = FALSE) mpi.bcast.cmd( id <- mpi.comm.rank() ) mpi.bcast.cmd( np <- mpi.comm.size() ) mpi.bcast.cmd( host <- mpi.get.processor.name() ) result <- mpi.remote.exec(paste("I am", id, "of", np, "running on", host)) print(unlist(result)) # Sample one normal observation on the master and each slave x <- rnorm(1) mpi.bcast.cmd(x <- rnorm(1)) # Gather the entire x vector (by default to process 0, the master) mpi.bcast.cmd(mpi.gather.Robj(x)) y <- mpi.gather.Robj(x) print(unlist(y)) # Sum the x vector together, storing the result on process 0 by default mpi.bcast.cmd(mpi.reduce(x, op = "sum")) z <- mpi.reduce(x, op = "sum") print(z) mpi.close.Rslaves(dellog = FALSE) mpi.exit()
#!/bin/bash #SBATCH --job-name=Rmpi_test #SBATCH --output=slurm.out #SBATCH --error=slurm.err #SBATCH --partition=develop #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 mpirun -np 1 R --no-save < driver.R
[araim1@tara-fe1 Rmpi-comm-test]$ cat slurm.out [... R disclaimer message ...] > library(Rmpi) > > mpi.spawn.Rslaves(needlog = FALSE) 8 slaves are spawned successfully. 0 failed. master (rank 0, comm 1) of size 9 is running on: n1 slave1 (rank 1, comm 1) of size 9 is running on: n1 ... ... ... slave8 (rank 8, comm 1) of size 9 is running on: n1 > > mpi.bcast.cmd( id <- mpi.comm.rank() ) > mpi.bcast.cmd( np <- mpi.comm.size() ) > mpi.bcast.cmd( host <- mpi.get.processor.name() ) > result <- mpi.remote.exec(paste("I am", id, "of", np, "running on", host)) > print(unlist(result)) slave1 slave2 "I am 1 of 9 running on n1" "I am 2 of 9 running on n1" slave3 slave4 "I am 3 of 9 running on n1" "I am 4 of 9 running on n1" slave5 slave6 "I am 5 of 9 running on n1" "I am 6 of 9 running on n1" slave7 slave8 "I am 7 of 9 running on n1" "I am 8 of 9 running on n1" > > # Sample one normal observation on the master and each slave > x <- rnorm(1) > mpi.bcast.cmd(x <- rnorm(1)) > > # Gather the entire x vector (by default to process 0, the master) > mpi.bcast.cmd(mpi.gather.Robj(x)) > y <- mpi.gather.Robj(x) > print(unlist(y)) [1] -2.6498050 0.5241441 -0.6747354 0.5915066 0.7660781 0.3608518 -2.7048508 [8] -0.4686277 0.5241441 > > # Sum the x vector together, storing the result on process 0 by default > mpi.bcast.cmd(mpi.reduce(x, op = "sum")) > z <- mpi.reduce(x, op = "sum") > print(z) [1] -3.731294 > > mpi.close.Rslaves(dellog = FALSE) [1] 1 > mpi.exit() [1] "Detaching Rmpi. Rmpi cannot be used unless relaunching R." >