PDC Lab 2-5
PDC Lab 2-5
Theory
Code:
#include <stdio.h>
#include <omp.h>
int main()
return 0;
Theory
Global sum is calculated using parallel reduction. Matrix-vector multiplication involves multiplying
a matrix with a vector to produce another vector.
Code:
#include <stdio.h>
#include <omp.h>
int main()
int mat[2][2] = {{1, 2}, {3, 4}}, vec[2] = {1, 1}, res[2] = {0};
return 0;
Theory
Summation is done using parallel reduction, and matrix-vector multiplication follows the row-wise
approach.
Code:
#include <stdio.h>
#include <omp.h>
int main()
int mat[2][2] = {{1, 2}, {3, 4}}, vec[2] = {1, 1}, res[2] = {0};
return 0;
Monte Carlo method is used to estimate π. MPI distributes computations among multiple
processes.
Code:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
double x, y, pi;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
x = (double)rand() / RAND_MAX;
y = (double)rand() / RAND_MAX;
if (x * x + y * y <= 1) inside++;
int global_inside;
if (rank == 0) {
}
MPI_Finalize();
return 0;
Theory
Sorting algorithms can be parallelized using OpenMP or MPI to divide the data among multiple
threads or processes.
Code:
#include <stdio.h>
#include <mpi.h>
if (a[j] > a[j + 1]) { int t = a[j]; a[j] = a[j + 1]; a[j + 1] = t; }
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
sort(local, 4);
if (rank == 0) { sort(a, 8); for (int i = 0; i < 8; i++) printf("%d ", a[i]); }
MPI_Finalize();
return 0;
Theory
Bitonic sort is an efficient parallel sorting algorithm that works well with parallel programming
models.
Code:
#include <stdio.h>
#include <omp.h>
#define N 8
if (cnt > 1) {
int k = cnt / 2;
if ((a[i] > a[i + k]) == dir) { int t = a[i]; a[i] = a[i + k]; a[i + k] = t; }
int main() {
bitonicSort(a, 0, N, 1);
return 0;