"Fossies" - the Fresh Open Source Software Archive

Member "openmpi-4.1.2/ompi/mca/io/romio321/romio/adio/common/ad_darray.c" (24 Nov 2021, 9253 Bytes) of package /linux/misc/openmpi-4.1.2.tar.bz2:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "ad_darray.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 4.1.1_vs_4.1.2.

    1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
    2 /* 
    3  *
    4  *   Copyright (C) 1997 University of Chicago. 
    5  *   See COPYRIGHT notice in top-level directory.
    6  */
    7 
    8 #include "adio.h"
    9 #include "adio_extern.h"
   10 
   11 static int MPIOI_Type_block(int *array_of_gsizes, int dim, int ndims, int nprocs,
   12              int rank, int darg, int order, MPI_Aint orig_extent,
   13              MPI_Datatype type_old, MPI_Datatype *type_new,
   14              MPI_Aint *st_offset);
   15 static int MPIOI_Type_cyclic(int *array_of_gsizes, int dim, int ndims, int nprocs,
   16               int rank, int darg, int order, MPI_Aint orig_extent,
   17               MPI_Datatype type_old, MPI_Datatype *type_new,
   18               MPI_Aint *st_offset);
   19 
   20 
   21 int ADIO_Type_create_darray(int size, int rank, int ndims, 
   22                 int *array_of_gsizes, int *array_of_distribs, 
   23                 int *array_of_dargs, int *array_of_psizes, 
   24                 int order, MPI_Datatype oldtype, 
   25                 MPI_Datatype *newtype) 
   26 {
   27     MPI_Datatype type_old, type_new=MPI_DATATYPE_NULL, inttype;
   28     int procs, tmp_rank, i, tmp_size, blklen, *coords;
   29     MPI_Aint *st_offsets, orig_extent, disp, ub, lb;
   30 
   31     MPI_Type_get_extent(oldtype, &lb, &orig_extent);
   32 
   33 /* calculate position in Cartesian grid as MPI would (row-major
   34    ordering) */
   35     coords = (int *) ADIOI_Malloc(ndims*sizeof(int));
   36     procs = size;
   37     tmp_rank = rank;
   38     for (i=0; i<ndims; i++) {
   39     procs = procs/array_of_psizes[i];
   40     coords[i] = tmp_rank/procs;
   41     tmp_rank = tmp_rank % procs;
   42     }
   43 
   44     st_offsets = (MPI_Aint *) ADIOI_Malloc(ndims*sizeof(MPI_Aint));
   45     type_old = oldtype;
   46 
   47     if (order == MPI_ORDER_FORTRAN) {
   48       /* dimension 0 changes fastest */
   49     for (i=0; i<ndims; i++) {
   50         switch(array_of_distribs[i]) {
   51         case MPI_DISTRIBUTE_BLOCK:
   52         MPIOI_Type_block(array_of_gsizes, i, ndims,
   53                  array_of_psizes[i],
   54                  coords[i], array_of_dargs[i],
   55                  order, orig_extent, 
   56                  type_old, &type_new,
   57                  st_offsets+i); 
   58         break;
   59         case MPI_DISTRIBUTE_CYCLIC:
   60         MPIOI_Type_cyclic(array_of_gsizes, i, ndims, 
   61                   array_of_psizes[i], coords[i],
   62                   array_of_dargs[i], order,
   63                   orig_extent, type_old,
   64                   &type_new, st_offsets+i);
   65         break;
   66         case MPI_DISTRIBUTE_NONE:
   67         /* treat it as a block distribution on 1 process */
   68         MPIOI_Type_block(array_of_gsizes, i, ndims, 1, 0, 
   69                  MPI_DISTRIBUTE_DFLT_DARG, order,
   70                  orig_extent, 
   71                  type_old, &type_new,
   72                  st_offsets+i); 
   73         break;
   74         }
   75         if (i) MPI_Type_free(&type_old);
   76         type_old = type_new;
   77     }
   78 
   79     /* add displacement and UB */
   80     disp = st_offsets[0];
   81     tmp_size = 1;
   82     for (i=1; i<ndims; i++) {
   83         tmp_size *= array_of_gsizes[i-1];
   84         disp += (MPI_Aint)tmp_size*st_offsets[i];
   85     }
   86         /* rest done below for both Fortran and C order */
   87     }
   88 
   89     else /* order == MPI_ORDER_C */ {
   90         /* dimension ndims-1 changes fastest */
   91     for (i=ndims-1; i>=0; i--) {
   92         switch(array_of_distribs[i]) {
   93         case MPI_DISTRIBUTE_BLOCK:
   94         MPIOI_Type_block(array_of_gsizes, i, ndims, array_of_psizes[i],
   95                  coords[i], array_of_dargs[i], order,
   96                  orig_extent, type_old, &type_new,
   97                  st_offsets+i); 
   98         break;
   99         case MPI_DISTRIBUTE_CYCLIC:
  100         MPIOI_Type_cyclic(array_of_gsizes, i, ndims, 
  101                   array_of_psizes[i], coords[i],
  102                   array_of_dargs[i], order, 
  103                   orig_extent, type_old, &type_new,
  104                   st_offsets+i);
  105         break;
  106         case MPI_DISTRIBUTE_NONE:
  107         /* treat it as a block distribution on 1 process */
  108         MPIOI_Type_block(array_of_gsizes, i, ndims, array_of_psizes[i],
  109               coords[i], MPI_DISTRIBUTE_DFLT_DARG, order, orig_extent, 
  110                            type_old, &type_new, st_offsets+i); 
  111         break;
  112         }
  113         if (i != ndims-1) MPI_Type_free(&type_old);
  114         type_old = type_new;
  115     }
  116 
  117     /* add displacement and UB */
  118     disp = st_offsets[ndims-1];
  119     tmp_size = 1;
  120     for (i=ndims-2; i>=0; i--) {
  121         tmp_size *= array_of_gsizes[i+1];
  122         disp += (MPI_Aint)tmp_size*st_offsets[i];
  123     }
  124     }
  125 
  126     disp *= orig_extent;
  127 
  128     ub = orig_extent;
  129     for (i=0; i<ndims; i++) ub *= (MPI_Aint)array_of_gsizes[i];
  130     
  131     blklen = 1;
  132     
  133     MPI_Type_create_struct(1, &blklen, &disp, &type_new, &inttype);
  134     MPI_Type_create_resized (inttype, 0, ub, newtype);
  135     MPI_Type_free (&inttype);
  136 
  137     MPI_Type_free(&type_new);
  138     ADIOI_Free(st_offsets);
  139     ADIOI_Free(coords);
  140     return MPI_SUCCESS;
  141 }
  142 
  143 
  144 /* Returns MPI_SUCCESS on success, an MPI error code on failure.  Code above
  145  * needs to call MPIO_Err_return_xxx.
  146  */
  147 static int MPIOI_Type_block(int *array_of_gsizes, int dim, int ndims, int nprocs,
  148              int rank, int darg, int order, MPI_Aint orig_extent,
  149              MPI_Datatype type_old, MPI_Datatype *type_new,
  150              MPI_Aint *st_offset) 
  151 {
  152 /* nprocs = no. of processes in dimension dim of grid
  153    rank = coordinate of this process in dimension dim */
  154     int blksize, global_size, mysize, i, j;
  155     MPI_Aint stride;
  156     
  157     global_size = array_of_gsizes[dim];
  158 
  159     if (darg == MPI_DISTRIBUTE_DFLT_DARG)
  160     blksize = (global_size + nprocs - 1)/nprocs;
  161     else {
  162     blksize = darg;
  163 
  164     /* --BEGIN ERROR HANDLING-- */
  165     if (blksize <= 0) {
  166         return MPI_ERR_ARG;
  167     }
  168 
  169     if (blksize * nprocs < global_size) {
  170         return MPI_ERR_ARG;
  171     }
  172     /* --END ERROR HANDLING-- */
  173     }
  174 
  175     j = global_size - blksize*rank;
  176     mysize = ADIOI_MIN(blksize, j);
  177     if (mysize < 0) mysize = 0;
  178 
  179     stride = orig_extent;
  180     if (order == MPI_ORDER_FORTRAN) {
  181     if (dim == 0) 
  182         MPI_Type_contiguous(mysize, type_old, type_new);
  183     else {
  184         for (i=0; i<dim; i++) stride *= (MPI_Aint)array_of_gsizes[i];
  185         MPI_Type_create_hvector(mysize, 1, stride, type_old, type_new);
  186     }
  187     }
  188     else {
  189     if (dim == ndims-1) 
  190         MPI_Type_contiguous(mysize, type_old, type_new);
  191     else {
  192         for (i=ndims-1; i>dim; i--) stride *= (MPI_Aint)array_of_gsizes[i];
  193         MPI_Type_create_hvector(mysize, 1, stride, type_old, type_new);
  194     }
  195 
  196     }
  197 
  198     *st_offset = (MPI_Aint)blksize * (MPI_Aint)rank;
  199      /* in terms of no. of elements of type oldtype in this dimension */
  200     if (mysize == 0) *st_offset = 0;
  201 
  202     MPI_Aint ex;
  203     MPI_Type_extent(type_old, &ex);
  204     MPI_Datatype type_tmp;
  205     MPI_Type_create_resized(*type_new, 0, array_of_gsizes[dim] * ex, &type_tmp);
  206     MPI_Type_free(type_new);
  207     *type_new = type_tmp;
  208 
  209     return MPI_SUCCESS;
  210 }
  211 
  212 
  213 /* Returns MPI_SUCCESS on success, an MPI error code on failure.  Code above
  214  * needs to call MPIO_Err_return_xxx.
  215  */
  216 static int MPIOI_Type_cyclic(int *array_of_gsizes, int dim, int ndims, int nprocs,
  217               int rank, int darg, int order, MPI_Aint orig_extent,
  218               MPI_Datatype type_old, MPI_Datatype *type_new,
  219               MPI_Aint *st_offset) 
  220 {
  221 /* nprocs = no. of processes in dimension dim of grid
  222    rank = coordinate of this process in dimension dim */
  223     int blksize, i, blklens[3], st_index, end_index, local_size, rem, count;
  224     MPI_Aint stride, disps[3];
  225     MPI_Datatype type_tmp, type_tmp1, types[3];
  226 
  227     if (darg == MPI_DISTRIBUTE_DFLT_DARG) blksize = 1;
  228     else blksize = darg;
  229 
  230     /* --BEGIN ERROR HANDLING-- */
  231     if (blksize <= 0) {
  232     return MPI_ERR_ARG;
  233     }
  234     /* --END ERROR HANDLING-- */
  235     
  236     st_index = rank*blksize;
  237     end_index = array_of_gsizes[dim] - 1;
  238 
  239     if (end_index < st_index) local_size = 0;
  240     else {
  241     local_size = ((end_index - st_index + 1)/(nprocs*blksize))*blksize;
  242     rem = (end_index - st_index + 1) % (nprocs*blksize);
  243     local_size += ADIOI_MIN(rem, blksize);
  244     }
  245 
  246     count = local_size/blksize;
  247     rem = local_size % blksize;
  248     
  249     stride = (MPI_Aint)nprocs*(MPI_Aint)blksize*orig_extent;
  250     if (order == MPI_ORDER_FORTRAN)
  251     for (i=0; i<dim; i++) stride *= (MPI_Aint)array_of_gsizes[i];
  252     else for (i=ndims-1; i>dim; i--) stride *= (MPI_Aint)array_of_gsizes[i];
  253 
  254     MPI_Type_create_hvector(count, blksize, stride, type_old, type_new);
  255 
  256     if (rem) {
  257     /* if the last block is of size less than blksize, include
  258        it separately using MPI_Type_struct */
  259 
  260     types[0] = *type_new;
  261     types[1] = type_old;
  262     disps[0] = 0;
  263     disps[1] = (MPI_Aint)count*stride;
  264     blklens[0] = 1;
  265     blklens[1] = rem;
  266 
  267     MPI_Type_create_struct(2, blklens, disps, types, &type_tmp);
  268 
  269     MPI_Type_free(type_new);
  270     *type_new = type_tmp;
  271     }
  272 
  273     /* In the first iteration, we need to set the displacement in that
  274        dimension correctly. */ 
  275     if ( ((order == MPI_ORDER_FORTRAN) && (dim == 0)) ||
  276          ((order == MPI_ORDER_C) && (dim == ndims-1)) ) {
  277         types[0] = *type_new;
  278         disps[0] = (MPI_Aint)rank * (MPI_Aint)blksize * orig_extent;
  279         blklens[0] = 1;
  280         MPI_Type_create_struct(1, blklens, disps, types, &type_tmp1);
  281         MPI_Type_create_resized (type_tmp1, 0, orig_extent * (MPI_Aint)array_of_gsizes[dim], &type_tmp);
  282         MPI_Type_free(&type_tmp1);
  283         MPI_Type_free(type_new);
  284         *type_new = type_tmp;
  285 
  286         *st_offset = 0;  /* set it to 0 because it is taken care of in
  287                             the struct above */
  288     }
  289     else {
  290         *st_offset = (MPI_Aint)rank * (MPI_Aint)blksize; 
  291         /* st_offset is in terms of no. of elements of type oldtype in
  292          * this dimension */ 
  293     }
  294 
  295     if (local_size == 0) *st_offset = 0;
  296 
  297     MPI_Aint ex;
  298     MPI_Type_extent(type_old, &ex);
  299     MPI_Datatype type_tmp2;
  300     MPI_Type_create_resized(*type_new, 0, array_of_gsizes[dim] * ex, &type_tmp2);
  301     MPI_Type_free(type_new);
  302     *type_new = type_tmp2;
  303 
  304     return MPI_SUCCESS;
  305 }