MAP laB mannual
MAP laB mannual
AND ENGINEERING
Name : -------------------------------------------------
Register No : -----------------------------------------
PROGRAM:
#include<stdio.h> #include <omp.h>
int main(void)
{
printf("Before: total thread number is %d\n", omp_get_num_threads());
#pragmaomp parallel
{
printf("Thread id is %d\n",omp_get_thread_num());
}
printf("After: total thread number is %d\n", omp_get_num_threads());
return 0;
}
OUTPUT:
AIM:
PROGRAM:
#include <stdio.h>
#include <omp.h>
int main()
float c[2];
int i,j;
// computes A*b #pragmaomp parallel forfor (i=0; i<2; i++) { c[i]=0; for (j=0;j<2;j++)
{ c[i]=c[i]+A[i][j]*b[j];
}
}
// prints result for (i=0; i<2; i++) {
printf("c[%i]=%f \n",i,c[i]);
}
return 0;
}
OUTPUT:
Ex.no:3 Create a program that computes the sum of all the elements in
an array A (C/C++) or a program that finds the largest number
Date: in an array A. Use OpenMP directives to make it run in parallel
AIM:
To create a program that computes the sum of all the elements in an array.
ALGORITHM:
Step 1: Start
Step 2: Creation of a program for computing the sum of all the elements an array.
Step 3: Input the array elements.
Step 4: Process of addition.
Step 5: Print the resultant sum.
Step 6: Stop.
PROGRAM:
#include<omp.h>
#include<bits/stdc++.h> using namespace std;
Int main()
{
vector<int>arr{3,1,2,5,4,0}; ‘
queue<int> data;
intarr_sum=accumulate(arr.begin(),arr.end(),0);
intarr_size=arr.size(); intnew_data_size, x, y;
for(inti=0;i<arr_size;i++){
data.push(arr[i]);
}
omp_set_num_threads(ceil(arr_size/2));
#pragmaomp parallel
{
#pragmaomp critical
{
new_data_size=data.size(); for(int j=1;
j<new_data_size; j=j*2){x
=data.front(); data.pop(); y =data.front();
data.pop(); data.push(x+y);
}
}
}
return0;
}
OUTPUT:
Array of elements: 1 5 7 9 11
Sum: 33
Ex.no:4 Write a simple Program demonstrating Message-Passing logic
using OpenMP
Date:
Result:
Thus the program has been executed successfully.
AIM:
To write a simple program demonstrating Message-Passing logic usingOpenMP.
ALGORITHM:
Step 1: Start
Step 2: Creation of simple program demonstrating message passing logic.
Step 3: The message creation for transformation across web.
Step 4:Input the message.
Step 5: Process and print the result.
Step 6: Stop
PROGRAM:
#include <omp.h>
OUTPUT:
Hello World
AIM:
//Define minimum function that will be used later on to calculate minimum values between
two numbers
#ifndef min
#define min(a,b) (((a) < (b)) ? (a) : (b))#endif
{
int nthreads; int src, dst, middle;
//Initialize the graph with random distancesfor (src =0; src< N; src++)
{
for (dst = 0; dst< N; dst++)
{
// Distance from node to same node is 0. So, skipping these elementsif(src != dst) {
//Distances are generated to be between 0 and 19 distance_matrix[src][dst] = rand() % 20;
}
}
}
//Define time variable to record start time for execution of programdouble start_time =
omp_get_wtime(); for (middle = 0; middle < N; middle++)
{
int * dm=distance_matrix[middle];for(src= 0; src< N; src++)
{ int * ds=distance_matrix[src];for
(dst = 0; dst< N; dst++)
{ ds[dst]=min(ds[dst],ds[middle]+dm[dst]);
}
}}
double time = omp_get_wtime() - start_time; printf("Total time for sequential (in sec):%.2f\
n", time); for(nthreads=1; nthreads<= 10; nthreads++) {
//Define different number of threads omp_set_num_threads(nthreads);
/* Taking a node as mediator check if indirect distance between source and distance via
mediatoris less than direct distance between them */
#pragmaomp parallel shared(distance_matrix)for
(middle = 0; middle < N; middle++)
{ int * dm=distance_matrix[middle];
#pragma omp parallel for private(src, dst) schedule(dynamic)for (src = 0; src< N; src++) { int
* ds=distance_matrix[src];for
(dst = 0; dst< N; dst++)
{ ds[dst]=min(ds[dst],ds[middle]+dm[dst]);
}
}
}
double time = omp_get_wtime() - start_time; printf("Total time for thread %d (in sec):%.2f\
n", nthreads, time);
}
return 0;
OUTPUT
AIM:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int i = 0;
// If d is less than or
// equal to 1if
(d <= 1) {
// Increment pCircle by 1
pCircle++;
}
// Increment pSquare by 1 pSquare++;
}
}
// Stores the estimated value of PI
double pi = 4.0 * ((double)pCircle / (double)(pSquare));
Result:
AIM:
To write a program to demonstrate MPI-broadcast-and-collective communication in C.
ALGORITHM:
Step 1: Start
Step 2: Get the values for broadcasting.
Step 3: Process using MPI-broadcast-and-collective communication
Step 4: Print the output
Step 5: Stop
PROGRAM:
#include<mpi.h>
#include<stdio.h>
MPI_Finalize(); return0;
}
OUTPUT:
>>> ./run.py my_bcast
mpirun -n 2 ./my_bcast
Process 0 broadcasting data 1002 received data 100 from root
Result:
Thus the program has been executed successfully
Ex.no:8 Write a Program to demonstrate MPI-scatter-gather-and-all
gather in C.
Date:
AIM:
To write a program to demonstrate MPI-scatter-gather-and-all gather.
ALGORITHM:
Step 1: Start
Step 2: Get an array of random numbers as input.
Step 3: Compute the average of array of numbers.
Step 4: Process and print the result.
Step 5: Stop
PROGRAM:
#include <stdio.h> #include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include <assert.h>
// Creates an array of random numbers. Each number has a value from 0 - 1float
*create_rand_nums(int num_elements) { float *rand_nums = (float *)malloc(sizeof(float) *
num_elements); assert(rand_nums != NULL);
int i;
for (i = 0; i<num_elements; i++) { rand_nums[i] =
(rand() / (float)RAND_MAX);
} return rand_nums; }
// Seed the random number generator to get different results each time srand(time(NULL));
&world_rank);int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Scatter the random numbers from the root process to all processes in
// the MPI world
MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums,
num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD);
// Gather all partial averages down to all the processes float *sub_avgs
= (float *)malloc(sizeof(float) * world_size);assert(sub_avgs != NULL);
MPI_Allgather(&sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT,
MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
}
OUTPUT:
Result:
Date:
AIM:
Step 5: Stop
PROGRAM:
if (rank == 0)
{
array = malloc (10 * sizeof(int)); // Array of 10 elementsif(!array) // error checking
{
MPI_Abort (MPI_COMM_WORLD,1);
}
MPI_Send(&array,10,MPI_INT,1,tag,MPI_COMM_WORLD); }
if (rank == 1) {
MPI_Recv (&array,10,MPI_INT,0,tag,MPI_COMM_WORLD,&status);
// more code here
}MPI_Finalize();
OUTPUT:
>>> ./run.py send_recv mpirun –n/2 ./send_recv
Process 1 received number -1 from process 0
Result:
Thus the program has been executed successfully.
Step 5: Stop
PROGRAM:
#include <stdio.h> #include
<stdlib.h> #include <mpi.h>
#include "tmpi_rank.h" #include <time.h> int main(int argc, char** argv) {
MPI_Init(NULL, NULL); int world_rank; MPI_Comm_rank(MPI_COMM_WORLD,
&world_rank);int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Seed the random number generator to get different results each time srand(time(NULL) *
world_rank);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
}
OUTPUT:
>>> ./run.py random_rank Mpirun –n 4 ./random_rank 100
Rank for 0.242578 on process 0 – 0
Rank for 0.894732 on process 1 – 3
Rank for 0.789463 on process 2 – 2
Rank for 0.684195 on process 3 – 1
Result:
Thus the program has been executed successfully.