0

I am trying to parallelize the first for loop with openMp. Each array is being initialized with random numbers. I tried separating every individual array initialization into different sections but when I print the get_num_threads it shows that only Thread 0 is being used. Should I just create a omp for loop to initialize the arrays instead of breaking them into sections?

#include<stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include<omp.h>
#define ARRAY_SIZE 10

double randfrom(double min, double max);

double randfrom(double min, double max)
{
    double range = (max - min);
    double div = RAND_MAX / range;
    return min + (rand() / div);
}

int main() {
    int i;
    double a[ARRAY_SIZE], b[ARRAY_SIZE], c[ARRAY_SIZE], d[ARRAY_SIZE], e[ARRAY_SIZE], f[ARRAY_SIZE], y[ARRAY_SIZE];
    double min, max;
    int imin, imax;

    





    /*A[10] consists of random number in between 1 and 100
    B[10] consists of random number in between 10 and 50
    C[10] consists of random number in between 1 and 10
    D[10] consists of random number in between 1 and 50
    E[10] consists of random number in between 1 and 5
    F[10] consists of random number in between 10 and 80*/

    srand(time(NULL));
    
#pragma omp parallel 

    {

    
#pragma omp sections
        {
#pragma omp section
            {
#pragma omp parallel for
                for (i = 0; i < ARRAY_SIZE; i++) {

                    
                    a[i] = randfrom(1,100);
                    
                    printf_s("Hello from thread %d\n", omp_get_thread_num());
                    
                }


            }


#pragma omp section
            {
#pragma omp parallel for 
                for (i = 0; i < ARRAY_SIZE; i++) {
                    b[i] = randfrom(10, 50);
                    printf_s("Hello from thread %d\n", omp_get_thread_num());
                }
            }

#pragma omp section
            {
#pragma omp parallel for 
                for (i = 0; i < ARRAY_SIZE; i++) {
                    c[i] = randfrom(1, 10);
                    
                }
                printf_s("Hello from thread %d\n", omp_get_thread_num());
            }

#pragma omp section
            {
#pragma omp parallel for 
                for (i = 0; i < ARRAY_SIZE; i++) {
                    d[i] = randfrom(1, 50);
                    printf_s("Hello from thread %d\n", omp_get_thread_num());

                }
            }


#pragma omp section
            {
#pragma omp parallel for 
                for (i = 0; i < ARRAY_SIZE; i++) {
                    e[i] = randfrom(1, 5);
                }
            }

#pragma omp section
            {
#pragma omp parallel for 
                for (i = 0; i < ARRAY_SIZE; i++) {
                    f[i] = randfrom(10, 80);
                }
            }

        }
    }




    printf("This is the parallel Print\n\n\n");

#pragma omp parallel shared(a,b,c,d,e,f,y) private(i)
    {
        //Y=(A*B)+C+(D*E)+(F/2)
#pragma omp for schedule(dynamic) nowait
        for (i = 0; i < ARRAY_SIZE; i++) {
            /*printf("A[%d]%.2f",i, a[i]);
            printf("\n\n");
            printf("B[%d]%.2f", i, b[i]);
            printf("\n\n");
            printf("C[%d]%.2f", i, c[i]);
            printf("\n\n");
            printf("D[%d]%.2f", i, d[i]);
            printf("\n\n");
            printf("E[%d]%.2f", i, e[i]);
            printf("\n\n");
            printf("F[%d]%.2f", i, f[i]);
            printf("\n\n");*/
            y[i] = (a[i] * b[i]) + c[i] + (d[i] * e[i]) + (f[i] / 2);
            printf("Y[%d]=%.2f\n", i, y[i]);
        }
    }




#pragma omp parallel shared(y, min,imin,max,imax) private(i)
    {
        //min
#pragma omp for schedule(dynamic) nowait
        for (i = 0; i < ARRAY_SIZE; i++) {
            if (i == 0) {
                min = y[i];
                imin = i;
            }
            else {
                if (y[i] < min) {
                    min = y[i];
                    imin = i;
                }
            }
        }

        //max
#pragma omp for schedule(dynamic) nowait
        for (i = 0; i < ARRAY_SIZE; i++) {
            if (i == 0) {
                max = y[i];
                imax = i;
            }
            else {
                if (y[i] > max) {
                    max = y[i];
                    imax = i;
                }
            }
        }
    }
        printf("min y[%d] = %.2f\nmax y[%d] = %.2f\n", imin, min, imax, max);
    return 0;
}

Ibrahim
  • 27
  • 5
  • 3
    *Should I just create a omp for loop to initialize the arrays instead of breaking them into sections?* Yes. – High Performance Mark Apr 14 '22 at 14:11
  • Sorry to add onto the question but do we have to add a critical section in the second for loop? Enclosing the `y[i] = (a[i] * b[i]) + c[i] + (d[i] * e[i]) + (f[i] / 2);` in the critical section? – Ibrahim Apr 14 '22 at 14:47
  • 1
    Does this answer your question? [How to generate random numbers in parallel?](https://stackoverflow.com/questions/4287531/how-to-generate-random-numbers-in-parallel) – paleonix Apr 14 '22 at 14:55
  • While nested parallelism is possible it is disabled by default. Start by either doing `parallel for` or `parallel sections`. Why should you need a critical section there? Just don't put any print statements in parallel sections. – paleonix Apr 14 '22 at 14:58
  • In the real world you should only use multi-threading when you have a big `ARRAY_SIZE` which provides enough parallelism. No need for `sections` here. – paleonix Apr 14 '22 at 15:01
  • What is your goal? If you need a fast program, just forget parallelization as the OpenMP overheads are significantly bigger than the workload in your case. If you wish to learn OpenMP, I will post an answer and correct the errors in your OpenMP code (some already mentioned, but data races are not). – Laci Apr 14 '22 at 19:30
  • I want to learn openMP. So code correction would be greatly appreciated. It'll help me understand the parallelization better. – Ibrahim Apr 15 '22 at 04:06

0 Answers0