mpirun

Pages: 12
I am trying to run a code with MPI. I already have Microsoft SDKs and other mpi compiler along MINGW64 installed. I am trying to run the code using
mpiexec -np 2 project.exe
but still the code is asking me to run with 2 processors. Inside the code there is a line saying
printf("In order to use this example, you must configure with the
option\n--with-mpi-compilers=full_path_to_your_mpi_compilers and recompile.\n");

I don't understand how I should call the command line to run in parallel mpi.

This is the code if it helps

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
#include <math.h>
#include <iostream>
#include "ml_include.h"
#ifdef ML_MPI
#include <mpi.h>
extern int Poisson_getrow(ML_Operator* mat_in, int N_requested_rows, int requested_rows[],
    int allocated_space, int columns[], double values[], int row_lengths[]);
extern int Poisson_matvec(ML_Operator* mat_in, int in_length, double p[], int out_length,
    double ap[]);
extern int Poisson_comm(double x[], void* A_data);
extern int send_msg(char* send_buffer, int length, int neighbor);
extern int recv_msg(char* recv_buffer, int length, int neighbor, USR_REQ* request);
extern int post_msg(char* recv_buffer, int length, int neighbor, USR_REQ* request);
                                    
int main(int argc, char* argv[]) {
    ML* ml_object;
    int i, N_grids = 3, N_levels;
    double sol[5], rhs[5];
    ML_Aggregate* agg_object;
    int proc, nlocal, nlocal_allcolumns;
    MPI_Init(&argc, &argv);
    ML_Set_PrintLevel(15);
    for (i = 0; i < 5; i++) sol[i] = 0.;
    for (i = 0; i < 5; i++) rhs[i] = 2.;
    ML_Create(&ml_object, N_grids);
    proc = ml_object->comm->ML_mypid;
    std::cout << "num_procs" << proc << std::endl;
    if (ml_object->comm->ML_nprocs != 2) {
        if (proc == 0) printf("Must be run on two processors\n");
        ML_Destroy(&ml_object);
        MPI_Finalize();
        exit(1);
    }
    if (proc == 0) { nlocal = 2; nlocal_allcolumns = 4; }
    else if (proc == 1) { nlocal = 3; nlocal_allcolumns = 5; }
    else { nlocal = 0; nlocal_allcolumns = 0; }
    ML_Init_Amatrix(ml_object, 0, nlocal, nlocal, &proc);
    ML_Set_Amatrix_Getrow(ml_object, 0, Poisson_getrow, Poisson_comm,
        nlocal_allcolumns);
    ML_Set_Amatrix_Matvec(ml_object, 0, Poisson_matvec);
    ML_Aggregate_Create(&agg_object);
    ML_Aggregate_Set_MaxCoarseSize(agg_object, 1);
    N_levels = ML_Gen_MGHierarchy_UsingAggregation(ml_object, 0,
        ML_INCREASING, agg_object);
    ML_Gen_Smoother_Jacobi(ml_object, ML_ALL_LEVELS, ML_PRESMOOTHER, 1, ML_DEFAULT);
    ML_Gen_Solver(ml_object, ML_MGV, 0, N_levels - 1);
    ML_Iterate(ml_object, sol, rhs);
    if (proc == 0) {
        printf("sol(0) = %e\n", sol[1]);
        fflush(stdout);
    }
    ML_Comm_GsumInt(ml_object->comm, 1);  
    if (proc == 1) {
        printf("sol(1) = %e\n", sol[0]);
        printf("sol(2) = %e\n", sol[1]);
        printf("sol(3) = %e\n", sol[2]);
        fflush(stdout);
    }
    ML_Comm_GsumInt(ml_object->comm, 1);   
    if (proc == 0) {
        printf("sol(4) = %e\n", sol[0]);
        fflush(stdout);
    }
    ML_Aggregate_Destroy(&agg_object);
    ML_Destroy(&ml_object);
    MPI_Finalize();
    return 0;
}

int Poisson_getrow(ML_Operator* mat_in, int N_requested_rows, int requested_rows[],
    int allocated_space, int cols[], double values[], int row_lengths[])
{
    int m = 0, i, row, proc, * itemp, start;
    itemp = (int*)ML_Get_MyGetrowData(mat_in);
    proc = *itemp;
    for (i = 0; i < N_requested_rows; i++) {
        row = requested_rows[i];
        if (allocated_space < m + 3) return(0);
        values[m] = 2; values[m + 1] = -1; values[m + 2] = -1;
        start = m;
        if (proc == 0) {
            if (row == 0) { cols[m++] = 0; cols[m++] = 2; }
            if (row == 1) { cols[m++] = 1; cols[m++] = 3; }
        }
        if (proc == 1) {
            if (row == 0) { cols[m++] = 0; cols[m++] = 1; cols[m++] = 4; }
            if (row == 1) { cols[m++] = 1; cols[m++] = 0; cols[m++] = 2; }
            if (row == 2) { cols[m++] = 2; cols[m++] = 1; cols[m++] = 3; }
        }
        row_lengths[i] = m - start;
    }
    return(1);
}

int Poisson_matvec(ML_Operator* mat_in, int in_length, double p[], int out_length,
    double ap[])
{
    int i, proc, * itemp;
    double new_p[5];
    itemp = (int*)ML_Get_MyMatvecData(mat_in);
    proc = *itemp;
    for (i = 0; i < in_length; i++) new_p[i] = p[i];
    Poisson_comm(new_p, &proc);
    for (i = 0; i < out_length; i++) ap[i] = 2. * new_p[i];
    if (proc == 0) {
        ap[0] -= new_p[2];
        ap[1] -= new_p[3];
    }
    if (proc == 1) {
        ap[0] -= new_p[1]; ap[0] -= new_p[4];
        ap[1] -= new_p[2]; ap[1] -= new_p[0];
        ap[2] -= new_p[3]; ap[2] -= new_p[1];
    }
    return 0;
}

int Poisson_comm(double x[], void* A_data)
{
    int    proc, neighbor, length, * itemp;
    double send_buffer[2], recv_buffer[2];
    MPI_Request request;
    itemp = (int*)A_data;
    proc = *itemp;
    length = 2;
    if (proc == 0) {
        neighbor = 1;
        send_buffer[0] = x[0]; send_buffer[1] = x[1];
        post_msg((char*)recv_buffer, length, neighbor, &request);
        send_msg((char*)send_buffer, length, neighbor);
        recv_msg((char*)recv_buffer, length, neighbor, &request);
        x[2] = recv_buffer[1]; x[3] = recv_buffer[0];
    }
    else {
        neighbor = 0;
        send_buffer[0] = x[0]; send_buffer[1] = x[2];
        post_msg((char*)recv_buffer, length, neighbor, &request);
        send_msg((char*)send_buffer, length, neighbor);
        recv_msg((char*)recv_buffer, length, neighbor, &request);
        x[3] = recv_buffer[1]; x[4] = recv_buffer[0];
    }
    return 0;
}

int send_msg(char* send_buffer, int length, int neighbor)
{
    ML_Comm_Send(send_buffer, length * sizeof(double), neighbor, 123,
        MPI_COMM_WORLD);
    return 0;
}
int recv_msg(char* recv_buffer, int length, int neighbor, USR_REQ* request)
{
    MPI_Status status;
    MPI_Wait(request, &status);
    return 0;
}
int post_msg(char* recv_buffer, int length, int neighbor, USR_REQ* request)
{
    int type = 123;
    ML_Comm_Irecv(recv_buffer, length * sizeof(double), &neighbor,
        &type, MPI_COMM_WORLD, request);
    return 0;
}
#else
int main(int argc, char* argv[])
{
    printf("In order to use this example, you must configure with the option\n--with-mpi-compilers=full_path_to_your_mpi_compilers and recompile.\n");

    return(EXIT_SUCCESS);
}
#endif  


Last edited on
Your code looks like

1
2
3
4
5
6
7
8
9
10
11
12
...
#include "ml_include.h"      // <===== What is this? It is NOT standard

#ifdef ML_MPI                    // <===== where is this defined ? It is NOT standard
   #include <mpi.h>
   int main()                          // runs parallel code
   ...

#else
   int main()                          // runs serial code (and confirms it!)

#endif 



You are doing a lot of non-standard things.

What is in ml_include.h ? It is not a standard header (either in c++ or MPI)

Where have you defined ML_MPI? If you haven't defined it then you will end up running the serial code (as you have done).


You didn't need a long and complex code to ask this question - why not simplify it to its bare essentials first.


Last edited on
ml_include is coming from a package (It's referring to multilevel preconditioner solver) which has been already built. This code can be run in two different ways, either in mpi or serial. I defined ML_MPI in preprocessor so the code would understand in which version should build the program. I couldn't make a simpler case because I needed to test the ml solver and this code is an example of ml usage.
Last edited on
The problem is the defined macro is not working and in any case it works just in one processor. but my question is why? I already defined ML_MPI in preprocessor. how can I make the compiler to understand it should work in mpi version.
Change the line
std::cout << "num_procs" << proc << std::endl;
to
std::cout << "num_procs" << proc << " of " << ml_object->comm->ML_nprocs << " processors " << std::endl;
and state what it reports.


There is also nothing stopping you running the normal introspective routines
1
2
   MPI_Comm_rank( MPI_COMM_WORLD, &rank );
   MPI_Comm_size( MPI_COMM_WORLD, &size );

and printing out the values of rank and size.
Last edited on
num_procs0 of 1 processors
Must be run on two processors
num_procs0 of 1 processors
Must be run on two processors
I think of of the answers is removed from the comment.
When you create your ml_object with
ML_Create(&ml_object, N_grids);
how is it supposed to know how many processors there are?
The ML is already built based on mpi. I wrote a shell script when building the package and it already knows that the ml should be built based on mpi. I also checked the console output when building the package and all required compilers were found. so when you call mpiexec command line ml already knows it's supposed to run in mpi and knows the number of processors.
The number of processors get from the
mpiexec -np 2 progname.exe
into the program via the
MPI_Init(&argc, &argv);

So the number of processors will be known to the MPI system. But there is nowhere in your code where you pass that information to ml_object.

What does ML_Create do? Is its source code accessible anywhere?
Last edited on
@lastchance Imagine I am building a package which requires other packages to be built. So I made those prerequisite packages first and then used those packages to build the last package. Then I build one visual studio project using the last package. but compiler keep complaining about the linker problem and then I have to link all those prerequisite packages in the VS project again. I think when I used those packages to build the last package then the last package should contain all those packages features. Am I wrong? I guess the same problem is happening with MPI. This package knows that it's built using MPI but in my project I need to include mpi directory and link to mpi library. I know that the possible answer to this should be during the building process of the last package it does not find the prerequisite packages in your machine but in fact it does. otherwise the last package can not be built.
Last edited on
Do you have access to the source code for ML_Create()? It is presumably in the package that you have built.

I would need to see how the information about the MPI runtime environment is used in constructing ml_object.
This is ml_include

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#ifndef _MLINCLUDE_
#define _MLINCLUDE_

#include "ml_common.h"
#include "ml_defs.h"
#include "ml_struct.h"
#include "ml_agg_genP.h"
#include "ml_aggregate.h"
#include "ml_amg.h"
#include "ml_amg_genP.h"
#include "ml_bicgstabl.h"
#include "ml_cg.h"
#include "ml_comm.h"
#include "ml_gmres.h"
#include "ml_grid.h"
#include "ml_gridagx.h"
#include "ml_gridfunc.h"
#include "ml_krylov.h"
#include "ml_operator.h"
#include "mli_solver.h"
#include "ml_pde.h"
#include "ml_solver.h"
#include "ml_vec.h"
#include "ml_elementagx.h"
#include "ml_intlist.h"
#include "ml_operatoragx.h"
#include "ml_xyt.h"
#include "ml_op_utils.h"
#include "ml_operator_blockmat.h"
#include "ml_agg_reitzinger.h"
#include "ml_aztec_utils.h"
#include "ml_memory.h"
#include "ml_vampir.h"
#include "ml_amesos.h"
#include "ml_ifpack.h"
#include "ml_agg_METIS.h"
#include "ml_agg_ParMETIS.h"

#endif 


Last edited on
Are you looking to see whether we need to define the number of processors for ml object in the code? something like
1
2
3
4
ML *ml_handle;
int N_levels = 10;
ML_Set_PrintLevel(3);
ML_Create(&ml_handle,N_levels);


or in the macro like

1
2
3
4
5
6
#ifdef ML_MPI
  MPI_Init(&argc,&argv);
  ML_MpiComm Comm(MPI_COMM_WORLD);
#else
  ML_SerialComm Comm;
#endif 

Last edited on
I am looking for the c++ source code defining the function
ML_Create( ML *, int )

That is the only possible place where the MPI system can supply the necessary processor information to your ML variables at runtime.

You are using variables
ml_object->comm->ML_mypid
and
ml_object->comm->ML_nprocs
and I need to know where and how they are set. At the moment they seem to have default values of 0 and 1, respectively.
Last edited on
Let me search for that in the package. I'll post it here as ASAP.
Last edited on
I searched for ML_Create and a lot of that was found.
Last edited on
You have posted code for function
MLI_Solver_Setup()
not
ML_Create()


Look at your original code. The only point where your object can get the necessary information is in ML_Create().
1
2
3
4
5
6
7
8
9
10
    MPI_Init(&argc, &argv);             // <==== MPI started
    // ...
    ML_Create(&ml_object, N_grids);       // <==== ml_object is set up by ML_Create()

    proc = ml_object->comm->ML_mypid;      // <==== At this point ml_object must have MPI communicator set to get processor rank from it
    std::cout << "num_procs" << proc << std::endl;
    if (ml_object->comm->ML_nprocs != 2) {            // <==== At this point ml_object must have MPI communicator set to get number of processors from it
        if (proc == 0) printf("Must be run on two processors\n");
        // .... 
    }
Last edited on
I am removing the previous post because it's making the post goes too long.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
int ml_defines_have_printed = 0;
int ML_Create(ML **ml_ptr, int Nlevels)
{
#ifdef ML_MPI
  return ML_Create2(ml_ptr, Nlevels, MPI_COMM_WORLD);
#else
  return ML_Create2(ml_ptr, Nlevels, 0);
#endif
}

int ML_Create2(ML **ml_ptr, int Nlevels, USR_COMM in_comm)
{
   int             i, length;
   double          *max_eigen;
   ML_Operator     *Amat, *Rmat, *Pmat;
   ML_Smoother     *pre_smoother, *post_smoother;
   ML_CSolve       *csolve;
   ML_Grid         *Grid;
   ML_BdryPts      *BCs;
   ML_Mapper       *eqn2grid, *grid2eqn;
   ML_DVector      *Amat_Normalization;
   ML_1Level       *SingleLevel;
   char            str[80];
   ML_Comm         *comm;
   int              *LevelID;
   char            *label;

#ifdef ML_TIMING
   struct ML_Timing *timing;
#endif

   ML_memory_alloc( (void**) ml_ptr, sizeof(ML), "MLM" );

   (*ml_ptr)->id                = ML_ID_ML;
   (*ml_ptr)->ML_finest_level   = -1;
   (*ml_ptr)->ML_coarsest_level = -1;
   (*ml_ptr)->output_level    = 10;
   (*ml_ptr)->res_output_freq = 1;
   (*ml_ptr)->tolerance       = 1.e-8;
   (*ml_ptr)->Cheby_eig_boost = 1.1;
   (*ml_ptr)->max_iterations  = 1000;
   (*ml_ptr)->MinPerProc_repartition = -1;
   (*ml_ptr)->PutOnSingleProc_repartition = -1;
   (*ml_ptr)->LargestMinMaxRatio_repartition = -1.;
   (*ml_ptr)->use_repartitioning = 0;
   (*ml_ptr)->sortColumnsAfterRAP = 0;
   (*ml_ptr)->repartitionStartLevel = -1;
   (*ml_ptr)->RAP_storage_type=ML_MSR_MATRIX;

   ML_Comm_Create2( &((*ml_ptr)->comm), in_comm );
   if (global_comm == NULL)
     global_comm = (*ml_ptr)->comm;

   if ((*ml_ptr)->comm->ML_mypid == 0 && 2 < ML_Get_PrintLevel() &&
       (Nlevels <= 0)) {
     printf("ML_Create: Warning No. of requested levels = %d\n",Nlevels);
   }

comm = (*ml_ptr)->comm;
if (!ml_defines_have_printed && ML_Get_PrintLevel() > 0) {
#ifdef HAVE_ML_PARMETIS
  if (comm->ML_mypid == 0) printf("USing ParMETIS 3.x\n");
#endif
#ifdef ML_NOTALWAYS_LOWEST
  if (comm->ML_mypid == 0) printf("USing ML_NOTALWAYS_LOWEST\n");
#endif
#ifdef ML_SYNCH
  if (comm->ML_mypid == 0) printf("USing ML_SYNCH\n");
#endif
  ml_defines_have_printed = 1;
}

   ML_memory_check("ml_create start");

   ML_memory_alloc((void**) &pre_smoother, sizeof(ML_Smoother)*Nlevels,"MS1");
   ML_memory_alloc((void**) &post_smoother,sizeof(ML_Smoother)*Nlevels,"MS2");
   ML_memory_alloc((void**) &csolve       ,sizeof(ML_CSolve  )*Nlevels,"MCS");
   ML_memory_alloc((void**) &Grid         ,sizeof(ML_Grid    )*Nlevels,"MGD");
   ML_memory_alloc((void**) &BCs         ,sizeof(ML_BdryPts  )*Nlevels,"MBC");
   ML_memory_alloc((void**) &eqn2grid    ,sizeof(ML_Mapper   )*Nlevels,"MM1");
   ML_memory_alloc((void**) &grid2eqn    ,sizeof(ML_Mapper   )*Nlevels,"MM2");
   ML_memory_alloc((void**) &SingleLevel ,sizeof(ML_1Level   )*Nlevels,"MSL");
   ML_memory_alloc((void**) &Amat         ,sizeof(ML_Operator)*Nlevels,"MAM");
   ML_memory_alloc((void**) &Rmat         ,sizeof(ML_Operator)*Nlevels,"MRM");
   ML_memory_alloc((void**) &Pmat         ,sizeof(ML_Operator)*Nlevels,"MPM");
   ML_memory_alloc((void**) &max_eigen    ,sizeof(double)*Nlevels,"MQM");
   ML_memory_alloc((void**) &LevelID     ,sizeof(int)*Nlevels,"MSM");
   label = (char *) ML_allocate(sizeof(char) * 80);
   length = sizeof(ML_DVector) * Nlevels;
   for ( i = 0; i < Nlevels; i++ ) max_eigen[i] = 0.0;
   ML_memory_alloc((void**)&Amat_Normalization, (unsigned int) length, "MAN");

   (*ml_ptr)->ML_num_actual_levels      = -1;
   (*ml_ptr)->ML_num_levels      = Nlevels;
   (*ml_ptr)->pre_smoother       = pre_smoother;
   (*ml_ptr)->post_smoother      = post_smoother;
   (*ml_ptr)->csolve             = csolve;
   (*ml_ptr)->Amat               = Amat;
   (*ml_ptr)->Grid               = Grid;
   (*ml_ptr)->BCs                = BCs;
   (*ml_ptr)->eqn2grid           = eqn2grid;
   (*ml_ptr)->grid2eqn           = grid2eqn;
   (*ml_ptr)->SingleLevel        = SingleLevel;
   (*ml_ptr)->Rmat               = Rmat;
   (*ml_ptr)->Pmat               = Pmat;
   (*ml_ptr)->spectral_radius    = max_eigen;
   (*ml_ptr)->symmetrize_matrix  = ML_FALSE;
   (*ml_ptr)->Amat_Normalization = Amat_Normalization ;
   (*ml_ptr)->timing             = NULL;
   (*ml_ptr)->LevelID            = LevelID;
   (*ml_ptr)->label              = label;

#ifdef ML_TIMING
   ML_memory_alloc((void**) &timing, sizeof(struct ML_Timing),"MT");
   timing->precond_apply_time = 0.;
   timing->total_build_time   = 0.;
   (*ml_ptr)->timing = timing;
#endif

   for (i = 0; i < Nlevels; i++)
   {
      ML_Operator_Init(&(Amat[i]), (*ml_ptr)->comm);
      ML_Operator_Set_1Levels(&(Amat[i]), &SingleLevel[i], &SingleLevel[i]);
      ML_Operator_Set_BdryPts(&(Amat[i]), &BCs[i]);
      ML_Operator_Init(&(Rmat[i]), (*ml_ptr)->comm);
      ML_Operator_Set_1Levels(&(Rmat[i]), &SingleLevel[i], NULL);
      ML_Operator_Set_BdryPts(&(Rmat[i]), &BCs[i]);
      ML_Operator_Init(&(Pmat[i]), (*ml_ptr)->comm);
      ML_Operator_Set_1Levels(&(Pmat[i]), &SingleLevel[i], NULL);
      ML_Operator_Set_BdryPts(&(Pmat[i]), NULL);

      (SingleLevel[i]).comm = (ML_Comm *) (*ml_ptr)->comm;
      SingleLevel[i].Amat          = &Amat[i];
      SingleLevel[i].Rmat          = &Rmat[i];
      SingleLevel[i].Pmat          = &Pmat[i];
      SingleLevel[i].BCs           = &BCs[i];
      SingleLevel[i].eqn2grid      = &eqn2grid[i];
      SingleLevel[i].grid2eqn      = &grid2eqn[i];
      SingleLevel[i].Grid          = &Grid[i];
      SingleLevel[i].pre_smoother  = &pre_smoother[i];
      SingleLevel[i].post_smoother = &post_smoother[i];
      SingleLevel[i].csolve        = &csolve[i];
      SingleLevel[i].Amat_Normalization = &Amat_Normalization[i];
      ML_DVector_Init( &Amat_Normalization[i] );
      SingleLevel[i].levelnum      = i;

      ML_Mapper_Init( &(eqn2grid[i]) );
      ML_Mapper_Init( &(grid2eqn[i]) );
      ML_Grid_Init( &(Grid[i]) );
      ML_BdryPts_Init( &(BCs[i]) );

      LevelID[i] = -1;

      ML_Smoother_Init( &(pre_smoother[i]), &(SingleLevel[i]) );
      ML_Smoother_Init( &(post_smoother[i]), &(SingleLevel[i]) );

      ML_CSolve_Init( &(csolve[i]) );
      ML_CSolve_Set_1Level( &(csolve[i]), &(SingleLevel[i]) );
      sprintf(str,"Amat_%d",i); ML_Operator_Set_Label( &(Amat[i]),str);
      sprintf(str,"Rmat_%d",i); ML_Operator_Set_Label( &(Rmat[i]),str);
      sprintf(str,"Pmat_%d",i); ML_Operator_Set_Label( &(Pmat[i]),str);
      sprintf(str,"PreS_%d",i); ML_Smoother_Set_Label( &(pre_smoother[i]),str);
      sprintf(str,"PostS_%d",i);ML_Smoother_Set_Label( &(post_smoother[i]),str);
      sprintf(str,"Solve_%d",i);ML_CSolve_Set_Label(&(csolve[i]),str);




   }
  return 0;
}
Last edited on
The fact that it has this:

1
2
3
4
5
6
7
8
int ML_Create(ML **ml_ptr, int Nlevels)
{
#ifdef ML_MPI
  return ML_Create2(ml_ptr, Nlevels, MPI_COMM_WORLD);
#else
  return ML_Create2(ml_ptr, Nlevels, 0);
#endif
}

means that you have to have ML_MPI defined when you build this whole package, not just when you compile your example.

I have no idea where you have to set ML_MPI.


This code looks painfully double-dereferenced.
Pages: 12