0% found this document useful (0 votes)
2 views

EXERCISE- 4[1] (1)

The document discusses the use of the Message Passing Interface (MPI) for parallel computing, focusing on point-to-point and broadcast communication methods. It includes code examples for matrix multiplication and Pi calculation, demonstrating how to distribute tasks among processes and gather results. The document emphasizes the importance of MPI in managing communication between multiple computers in a parallel processing environment.

Uploaded by

nikhildeutsch03
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
2 views

EXERCISE- 4[1] (1)

The document discusses the use of the Message Passing Interface (MPI) for parallel computing, focusing on point-to-point and broadcast communication methods. It includes code examples for matrix multiplication and Pi calculation, demonstrating how to distribute tasks among processes and gather results. The document emphasizes the importance of MPI in managing communication between multiple computers in a parallel processing environment.

Uploaded by

nikhildeutsch03
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 8

EXERCISE- 4

Harshan.R
22IZ014
AIM:-
To study and analyze the use of MPI for parallel computing.

MPI:-
The message passing interface (MPI) is a standardized means of exchanging messages between
multiple computers running a parallel program across distributed memory.

POINT TO POINT COMMUNICATION IN MPI:-


1)MATRIX MULTIPLICATION :
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 4 // Matrix size (NxN)
// Function to print a matrix
void printMatrix(int matrix[N][N], const char *name) {
printf("\n%s Matrix:\n", name);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
printf("%3d ", matrix[i][j]);
printf("\n");
}
}
// Function to initialize a matrix with random values
void initRandomMatrix(int matrix[N][N]) {
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
matrix[i][j] = rand() % 10; // Random values between 0-9
}
int main(int argc, char *argv[]) {
int rank, size;
int A[N][N], B[N][N], C[N][N] = {0}; // Matrices

MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// Calculate how many rows each process will handle
int rows_per_proc = N / size;
int remainder = N % size;
// Determine starting and ending row for each process
int start_row = rank * rows_per_proc + (rank < remainder ? rank : remainder);
int end_row = start_row + rows_per_proc + (rank < remainder ? 1 : 0);
int local_rows = end_row - start_row;
// Allocate memory for local arrays
int (*local_A)[N] = malloc(local_rows * sizeof(*local_A));
int (*local_C)[N] = malloc(local_rows * sizeof(*local_C));
// Initialize matrices on root process
if (rank == 0) {
srand(time(NULL));
initRandomMatrix(A);
initRandomMatrix(B);
// Print input matrices
printMatrix(A, "A");
printMatrix(B, "B");
}
// Distribute matrix B to all processes
MPI_Bcast(B, N * N, MPI_INT, 0, MPI_COMM_WORLD);
// Distribute matrix A among processes
if (rank == 0) {
// Root keeps its portion
for (int i = 0; i < local_rows; i++)
for (int j = 0; j < N; j++)
local_A[i][j] = A[i][j];
// Send portions of A to other processes
int current_row = local_rows;
for (int dest = 1; dest < size; dest++) {
int dest_rows = rows_per_proc + (dest < remainder ? 1 : 0);
MPI_Send(&A[current_row][0], dest_rows * N, MPI_INT, dest, 0, MPI_COMM_WORLD);
current_row += dest_rows;
}
} else {
// Receive portion of A
MPI_Recv(local_A, local_rows * N, MPI_INT, 0, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
// Perform local matrix multiplication
for (int i = 0; i < local_rows; i++) {
for (int j = 0; j < N; j++) {
local_C[i][j] = 0;
for (int k = 0; k < N; k++) {
local_C[i][j] += local_A[i][k] * B[k][j];
}
}
}
// Gather results at the root process
if (rank == 0) {
// Copy local results into C
for (int i = 0; i < local_rows; i++)
for (int j = 0; j < N; j++)
C[i][j] = local_C[i][j];
// Receive results from other processes
int current_row = local_rows;
for (int src = 1; src < size; src++) {
int src_rows = rows_per_proc + (src < remainder ? 1 : 0);
MPI_Recv(&C[current_row][0], src_rows * N, MPI_INT, src, 1, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
current_row += src_rows;
}
// Print the final result matrix
printMatrix(C, "C (Result)");
} else {
// Send computed result back to root
MPI_Send(local_C, local_rows * N, MPI_INT, 0, 1, MPI_COMM_WORLD);
}
// Free allocated memory
free(local_A);
free(local_C);
MPI_Finalize();
return 0;
}

OUTPUT:-
2) FOR PI CALCULATION
#include <mpi.h>
#include <stdio.h>
double f(double x) {
return 4.0 / (1.0 + x * x); // Function to integrate
}
int main(int argc, char *argv[]) {
int rank, size;
double pi = 0.0, local_sum = 0.0;
int n = 1000000; // Number of intervals
double h = 1.0 / n; // Step size
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// Determine workload for each process
int start = rank * (n / size);
int end = (rank + 1) * (n / size);
// Compute local sum
for (int i = start; i < end; i++) {
double x = (i + 0.5) * h;
local_sum += f(x);
}
local_sum *= h; // Multiply by step size
// Print local sum from each process
printf("Process %d computed local sum: %.15f\n", rank, local_sum);
// Reduce local sums to compute the final Pi value
MPI_Reduce(&local_sum, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
// Print final result from rank 0
if (rank == 0) {
printf("Final Calculated Pi: %.15f\n", pi);
}
MPI_Finalize();
return 0;
}
OUTPUT:-

BROADCAST COMMUNICATION IN MPI:


1) MATRIX MULTIPLICATION:
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 4 // Matrix size (NxN)
// Function to print a matrix
void printMatrix(int matrix[N][N], const char *name) {
printf("\n%s Matrix:\n", name);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
printf("%3d ", matrix[i][j]);
printf("\n");
}
}
// Function to initialize a matrix with random values
void initRandomMatrix(int matrix[N][N]) {
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
matrix[i][j] = rand() % 10; // Random values between 0-9
}
int main(int argc, char *argv[]) {
int rank, size;
int A[N][N], B[N][N], C[N][N] = {0}; // Matrices
int local_rows;
int *sendcounts, *displs;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// Calculate how many rows each process will handle
local_rows = N / size;
if (rank < N % size) {
local_rows++; // Extra row for first few processes if N is not evenly divisible
}
// Allocate memory for local matrices
int(*local_A)[N] = malloc(local_rows * sizeof(*local_A));
int(*local_C)[N] = malloc(local_rows * sizeof(*local_C));
// Initialize result matrix to zero
for (int i = 0; i < local_rows; i++) {
for (int j = 0; j < N; j++) {
local_C[i][j] = 0;
}
}
// Root process prepares scatter and gather parameters
if (rank == 0) {
sendcounts = malloc(size * sizeof(int));
displs = malloc(size * sizeof(int));
int disp = 0;
for (int i = 0; i < size; i++) {
int rows_for_proc = N / size;
if (i < N % size) {
rows_for_proc++; // Distribute remaining rows to first few processes
}
sendcounts[i] = rows_for_proc * N;
displs[i] = disp;
disp += rows_for_proc * N;
}
// Initialize matrices with random values
srand(time(NULL));
initRandomMatrix(A);
initRandomMatrix(B);
// Print input matrices
printMatrix(A, "A");
printMatrix(B, "B");
}
// Distribute matrix A using MPI_Scatterv
MPI_Scatterv(A, sendcounts, displs, MPI_INT, local_A, local_rows * N, MPI_INT, 0,
MPI_COMM_WORLD);
// Broadcast matrix B to all processes
MPI_Bcast(B, N * N, MPI_INT, 0, MPI_COMM_WORLD);
// Compute matrix multiplication
for (int i = 0; i < local_rows; i++) {
for (int j = 0; j < N; j++) {
for (int k = 0; k < N; k++) {
local_C[i][j] += local_A[i][k] * B[k][j];
}
}
}
// Gather results back to process 0
MPI_Gatherv(local_C, local_rows * N, MPI_INT, C, sendcounts, displs, MPI_INT, 0,
MPI_COMM_WORLD);
// Print the result
if (rank == 0) {
printMatrix(C, "C (Result)");
free(sendcounts);
free(displs);
}
// Free allocated memory
free(local_A);
free(local_C);
MPI_Finalize();
return 0;
}
OUTPUT:-

2)FOR PI CALCULATION:
#include <mpi.h>
#include <stdio.h>
double f(double x) {
return 4.0 / (1.0 + x * x); // Function to integrate
}
int main(int argc, char *argv[]) {
int rank, size;
double pi = 0.0, local_sum = 0.0;
int n = 100000000; // Number of intervals
double h = 1.0 / n; // Step size
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// Broadcast the number of intervals to all processes
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
// Each process computes its local sum using a strided approach
for (int i = rank; i < n; i += size) {
double x = (i + 0.5) * h;
local_sum += f(x);
}
local_sum *= h;
// Print the local sum contribution of each process
printf("Process %d computed local sum: %.15f\n", rank, local_sum);
// Reduce local sums to compute the final Pi value at rank 0
MPI_Reduce(&local_sum, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
// Rank 0 prints the final computed value of Pi
if (rank == 0) {
printf("Final Calculated Pi: %.15f\n", pi);
}
MPI_Finalize();
return 0;
}

OUTPUT:

You might also like