[araim1@maya-usr1 ~]$ wget <paste_the_link_here>
[araim1@maya-usr1 ~]$ wget http://www.umbc.edu/hpcf/code/hello_serial/hello_serial.c --16:08:24-- http://www.umbc.edu/hpcf/code/hello_serial/hello_serial.c Resolving www.umbc.edu... 130.85.12.11 Connecting to www.umbc.edu|130.85.12.11|:80... connected. HTTP request sent, awaiting response... 200 OK Length: 183 [text/plain] Saving to: `hello_serial.c' 100%[======================================================================================>] 183 --.-K/s in 0s 16:08:24 (29.1 MB/s) - `hello_serial.c' saved [183/183] [araim1@maya-usr1 ~]$ ls hello_serial.c [araim1@maya-usr1 ~]$
#include <stdio.h> #include <unistd.h> int main(int argc, char* argv[]) { char hostname[256]; gethostname(hostname, 256); printf("Hello world from %s\n", hostname); return 0; }
[hu6@maya-usr1 hello_serial]$ icc hello_serial.c -o hello_serial [hu6@maya-usr1 hello_serial]$
[hu6@maya-usr1 hello_serial]$ ls hello_serial hello_serial.c
#include <stdio.h> #include <mpi.h> int main (int argc, char *argv[]) { int id, np; char processor_name[MPI_MAX_PROCESSOR_NAME]; int processor_name_len; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &np); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Get_processor_name(processor_name, &processor_name_len); printf("Hello world from process %03d out of %03d, processor name %s\n", id, np, processor_name); MPI_Finalize(); return 0; }
[hu6@maya-usr1 hello_parallel]$ mpiicc hello_parallel.c -o hello_parallel
[hu6@maya-usr1 hello_parallel]$ ls hello_parallel hello_parallel.c [hu6@maya-usr1 hello_parallel]$
[jongraf1@maya-usr1 ~]$ module list Currently Loaded Modulefiles: 1) dot 7) intel-mpi/64/4.1.3/049 2) matlab/r2014a 8) texlive/2014 3) comsol/4.4 9) quoter 4) gcc/4.8.2 10) git/2.0.4 5) slurm/14.03.6 11) default-environment 6) intel/compiler/64/15.0/2015.1.133
[hu6@maya-usr1 ~]$ mpiicc -show icc -I/cm/shared/apps/intel/mpi/4.1.3.049/intel64/include -L/cm/shared/apps/intel/mpi/4.1.3.049/intel64/lib -Xlinker --enable-new-dtags -Xlinker -rpath -Xlinker /cm/shared/apps/intel/mpi/4.1.3.049/intel64/lib -Xlinker -rpath -Xlinker /opt/intel/mpi-rt/4.1 -lmpigf -lmpi -lmpigi -ldl -lrt -lpthread
#include <stdio.h> #include <mpi.h> #include <string.h> int main(int argc, char* argv[]) { int id, np, processor_name_len; int j; int dest; int tag = 0; char processor_name[MPI_MAX_PROCESSOR_NAME]; char message[100]; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(MPI_COMM_WORLD, &np); MPI_Get_processor_name(processor_name, &processor_name_len); sprintf(message, "Process %03d out of %03d running on processor %4s", id, np, processor_name); if (id == 0) { printf("%s\n", message); for (j = 1; j < np; j++) { MPI_Recv(message, 100, MPI_CHAR, j, tag, MPI_COMM_WORLD, &status); printf("%s\n", message); } } else { dest = 0; MPI_Send(message, strlen(message)+1, MPI_CHAR, dest, tag, MPI_COMM_WORLD); } MPI_Finalize(); return 0; }
#include <stdio.h> #include <mpi.h> #include "nodes_used.h" int main(int argc, char* argv[]) { int id, np, processor_name_len; char processor_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(MPI_COMM_WORLD, &np); MPI_Get_processor_name(processor_name, &processor_name_len); FILE* log_fp = NULL; if (id == 0) { log_fp = fopen("nodes_used.log", "w"); } /* Log our MPI processes to the log file. We could also have specified the special FILE* names "stdout" or "stderr" here */ log_processes(log_fp, id, np, processor_name); if (id == 0) { fclose(log_fp); } MPI_Finalize(); return 0; }
#ifndef NODES_USED_H #define NODES_USED_H #include <stdio.h> #include <string.h> #include <mpi.h> void log_processes(FILE* fp, int id, int np, char* processor_name); #endif
#include "nodes_used.h" void log_processes(FILE* fp, int id, int np, char* processor_name) { int j, dest; char message[100]; int tag = 0; MPI_Status status; sprintf(message, "Process %04d out of %04d running on processor %4s", id, np, processor_name); if (id == 0) { fprintf(fp, "%s\n", message); for (j = 1; j < np; j++) { MPI_Recv(message, 100, MPI_CHAR, j, tag, MPI_COMM_WORLD, &status); fprintf(fp, "%s\n", message); } } else { dest = 0; MPI_Send(message, strlen(message)+1, MPI_CHAR, dest, tag, MPI_COMM_WORLD); } }
OBJS := nodes_used.o hello_send_recv.o EXECUTABLE := hello_send_recv DEFS := CFLAGS := -g -O3 INCLUDES := LDFLAGS := -lm CC := mpiicc %.o: %.c %.h $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ $(EXECUTABLE): $(OBJS) $(CC) $(CFLAGS) $(INCLUDES) $(OBJS) -o $@ $(LDFLAGS) clean: rm -f *.o $(EXECUTABLE)
[hu6@maya-usr1 hello_send_recv-2]$ make mpiicc -g -O3 -c nodes_used.c -o nodes_used.o mpiicc -g -O3 -c -o hello_send_recv.o hello_send_recv.c mpiicc -g -O3 nodes_used.o hello_send_recv.o -o hello_send_recv -lm [hu6@maya-usr1 hello_send_recv-2]$ ls hello_send_recv hello_send_recv.o nodes_used.c nodes_used.o hello_send_recv.c Makefile nodes_used.h [hu6@maya-usr1 hello_send_recv-2]$
[hu6@maya-usr1 hello_send_recv-2]$ make clean rm -f *.o hello_send_recv [hu6@maya-usr1 hello_send_recv-2]$ ls hello_send_recv.c Makefile nodes_used.c nodes_used.h [hu6@maya-usr1 hello_send_recv-2]$