Assignment 4

11
ECGR 5145 Parallel Computing Sudarshan Suresh Assignment-4 Graduate Student UNC Charlotte 800813703 Due On: 10/10/2014 Task 1 a. Sequential C program for Monte Carlo calculation #include<stdio.h> #include<stdlib.h> #include<math.h> #include<time.h> void main(int argc, char* argv[]) { double samples,N,S; double sum1,sum2; int i=0; float elapsedtime; int count=0; double total; double pi; printf("Enter the value of N and S used to estimate pi\n"); scanf("%lf %lf",&N,&S); samples=N*S; count=0; struct timeval start, end; srand(time(NULL));

description

Monte Carlo using MPI

Transcript of Assignment 4

Page 1: Assignment 4

ECGR 5145 – Parallel Computing Sudarshan Suresh

Assignment-4 Graduate Student

UNC Charlotte

800813703

Due On: 10/10/2014

Task 1

a. Sequential C program for Monte Carlo calculation

#include<stdio.h>

#include<stdlib.h>

#include<math.h>

#include<time.h>

void main(int argc, char* argv[])

{

double samples,N,S;

double sum1,sum2;

int i=0;

float elapsedtime;

int count=0;

double total;

double pi;

printf("Enter the value of N and S used to estimate pi\n");

scanf("%lf %lf",&N,&S);

samples=N*S;

count=0;

struct timeval start, end;

srand(time(NULL));

Page 2: Assignment 4

gettimeofday(&start, NULL);

while(i<samples)

{

sum1 = (double)random()/RAND_MAX;

sum2 = (double)random()/RAND_MAX;

total = sqrt((sum1^2)+(sum2^2));

i++;

if (total<=1)

{

++count;

}

}

gettimeofday(&end, NULL);

pi = ((double)count/(double)samples)*4.0;

float error= 3.14159265359-pi;

elapsedtime = (end.tv_sec - start.tv_sec) + ((end.tv_usec - start.tv_usec) / 1000.0);

printf("\nPi: %f\n", pi);

printf("\nError: %f\n",error);

printf("Elapsed time: %f seconds\n",elapsedtime);

}

Page 3: Assignment 4

a. Screenshot of the compilation of Sequential C program for the Monte carlo estimation

Screenshot of the execution of the sequential C program for different values N*S

For N*S=10000

For N*S=20000

For N*S=30000

Page 4: Assignment 4

Task 2

b. MPI Program for the Monte Carlo π estimation

#include <stdio.h>

#include <stdlib.h>

#include <mpi.h>

#include <time.h>

#include <math.h>

int main(int argc, char* argv[])

{

int N = 1000, S =40;

int rank, P;

double sum1,sum2;

int i, count=0, s = 0;

double total, pi;

float elapsedtime;

struct timeval start, end;

int fs = 0;

int fc = 0;

int seeds = 0;

MPI_Init(&argc, &argv);

MPI_Comm_rank(MPI_COMM_WORLD, &rank);

MPI_Comm_size(MPI_COMM_WORLD, &P);

MPI_Status status;

int recieved[P], buffer= 0;

long recv[P];

srand(time(NULL));

Page 5: Assignment 4

gettimeofday(&start, NULL);

if(rank == 0)

{

for (i = 1; i<P; i++)

{

MPI_Send(&buffer,1,MPI_INT,i,1, MPI_COMM_WORLD);

}

}

if(rank != 0)

{

MPI_Recv(&buffer,1,MPI_INT,0,MPI_ANY_TAG, MPI_COMM_WORLD,&status);

for (i=0; i<N; ++i)

{

sum1 = ((double)rand())/RAND_MAX;

sum2 =((double)rand())/RAND_MAX;

total = sqrt(sum1^2+sum2^2);

if (total<=1)

{

count++;

}

}

MPI_Send(&count,1,MPI_INT,0,rank,MPI_COMM_WORLD);

MPI_Send(&N,1,MPI_LONG,0,rank,MPI_COMM_WORLD);

}

else if (rank==0)

{

Page 6: Assignment 4

for(i=1; i<P; ++i)

{

MPI_Recv(&recieved[i],1,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,

MPI_COMM_WORLD,&status);

fc = fc + recieved[i];

recieved[i] = 0;

if(seeds < S)

{

MPI_Send(&buffer,1,MPI_INT,status.MPI_SOURCE,rank,MPI_COMM_WORLD);

s++;

}

MPI_Recv(&recv[i],1,MPI_LONG,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&statu

s);

fs = fs + recv[i];

recv[i] = 0;

}

}

if (rank == 0)

{

pi = ((double)fc)/((double)fs)*4.0;

float error=3.14159265359-pi;

gettimeofday(&end, NULL);

elapsedtime = (end.tv_sec - start.tv_sec) +((end.tv_usec - start.tv_usec) / 1000.0);

printf("\nThe values of pi for N=1000 and S=30\n");

printf("\nPi: %f\n", pi);

printf("\n error: %f\n",error);

printf("Elapsed Time: %f\n",elapsedtime);

}

Page 7: Assignment 4

MPI_Finalize();

return 0;

}

c. Screenshot of the compilation of the MPI program for Monte Carlo π estimation

Screenshot of the execution of the MPI program for Monte Carlo π estimation for different values of

N*S

For N*S=10000

For N*S=20000

Page 8: Assignment 4

For N*S=30000

d. Discussion on random numbers

Monte Carlo method is not very good if all the processes generate the same random number sequence

1. This is because they will start with the same seed due to the system clock time.

2. Need to generate local random numbers and ensure that the start seeds are different.

This can be rectified by using srand which changes the seed value according to the current system time

dynamically. So the starting seed changes dynamically according to current system time.

Task 3 (Extra)

a. Screenshot of the compilation of the MPI program on the UNCW babbage cluster

For 4 processes

Page 9: Assignment 4

For 8 processes

For 12 processes

For 16 processes

Screenshot of the execution time for a sequential program

b. Speedup Graphs

Ts=0.566000 seconds

Processes Tp Speedup=Ts/Tp

4 0.405 1.39

Page 10: Assignment 4

8 1.724 0.32

12 3.521 0.16

16 7.571 0.07

Graph of the Speedup vs Execution Time

Graph of the Execution time vs Number of processes

0

0.2

0.4

0.6

0.8

1

1.2

1.4

1.6

0 5 10 15 20

Speedup

Number of processes

Speed up vs Number of Processes

Parallel Execution

0

1

2

3

4

5

6

7

8

0 5 10 15 20

Parallel Execution Time

Number of Processes

Execution time vs Number of Processes

Parallel Execution

Page 11: Assignment 4

c. Conclusions

We can observe from the graphs and the output results that the execution time increases. This is because of

the overhead taken by the master to allocate the seeds to the slave processes. Hence more the number of

processes greater is the time taken to estimate the value of pi. However the value of pi converges more and

more towards 3.14.