Operation_IterativeLinearSolver.cpp (getdp-3.4.0-source.tgz) | : | Operation_IterativeLinearSolver.cpp (getdp-3.5.0-source.tgz) | ||
---|---|---|---|---|
// GetDP - Copyright (C) 1997-2021 P. Dular and C. Geuzaine, University of Liege | // GetDP - Copyright (C) 1997-2022 P. Dular and C. Geuzaine, University of Liege | |||
// | // | |||
// See the LICENSE.txt file for license information. Please report all | // See the LICENSE.txt file for license information. Please report all | |||
// issues on https://gitlab.onelab.info/getdp/getdp/issues. | // issues on https://gitlab.onelab.info/getdp/getdp/issues. | |||
// | // | |||
// Contributed by Bertrand Thierry | // Contributed by Bertrand Thierry | |||
#include <stdio.h> | #include <stdio.h> | |||
#include <stdlib.h> | #include <stdlib.h> | |||
#include "GetDPConfig.h" | #include "GetDPConfig.h" | |||
#include "ProData.h" | #include "ProData.h" | |||
#include "SolvingOperations.h" | #include "SolvingOperations.h" | |||
#include "Message.h" | #include "Message.h" | |||
#include "OS.h" | #include "OS.h" | |||
extern struct CurrentData Current ; | extern struct CurrentData Current; | |||
// for performance tests | // for performance tests | |||
//#define TIMER | //#define TIMER | |||
#if defined(HAVE_PETSC) && defined(HAVE_GMSH) | #if defined(HAVE_PETSC) && defined(HAVE_GMSH) | |||
#include "petscksp.h" | #include "petscksp.h" | |||
#include <gmsh/GmshGlobal.h> | #include <gmsh/GmshGlobal.h> | |||
#include <gmsh/PView.h> | #include <gmsh/PView.h> | |||
#include <gmsh/PViewData.h> | #include <gmsh/PViewData.h> | |||
#if ((PETSC_VERSION_RELEASE == 0) || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSI | #if((PETSC_VERSION_RELEASE == 0) || \ | |||
ON_MINOR >= 7))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 7))) | |||
#define PetscViewerSetFormat(A, B) PetscViewerPushFormat(A, B) | #define PetscViewerSetFormat(A, B) PetscViewerPushFormat(A, B) | |||
#endif | #endif | |||
static void _try(int ierr) | static void _try(int ierr) | |||
{ | { | |||
CHKERRCONTINUE(ierr); | CHKERRCONTINUE(ierr); | |||
if(PetscUnlikely(ierr)){ | if(PetscUnlikely(ierr)) { | |||
const char *text; | const char *text; | |||
PetscErrorMessage(ierr, &text, 0); | PetscErrorMessage(ierr, &text, 0); | |||
Message::Error("PETSc error: %s", text); | Message::Error("PETSc error: %s", text); | |||
Message::SetLastPETScError(ierr); | Message::SetLastPETScError(ierr); | |||
} | } | |||
} | } | |||
class ILS{ | class ILS { | |||
// A new communicator can be created. If some processes have no work they must | // A new communicator can be created. If some processes have no work they must | |||
// be excluded from the communicator to avoir dead-lock | // be excluded from the communicator to avoir dead-lock | |||
private: | private: | |||
// current cpu number and total number of cpus | // current cpu number and total number of cpus | |||
static MPI_Comm _comm; | static MPI_Comm _comm; | |||
static int _commRank, _commSize; | static int _commRank, _commSize; | |||
public: | ||||
static int GetCommRank(){ return _commRank; } | public: | |||
static int GetCommSize(){ return _commSize; } | static int GetCommRank() { return _commRank; } | |||
static MPI_Comm GetComm(){ return _comm; } | static int GetCommSize() { return _commSize; } | |||
static MPI_Comm GetComm() { return _comm; } | ||||
}; | }; | |||
MPI_Comm ILS::_comm = MPI_COMM_WORLD; | MPI_Comm ILS::_comm = MPI_COMM_WORLD; | |||
int ILS::_commRank = 0; | int ILS::_commRank = 0; | |||
int ILS::_commSize = 1; | int ILS::_commSize = 1; | |||
class ILSField{ | class ILSField { | |||
public: | public: | |||
// number of Fields in this class | // number of Fields in this class | |||
PetscInt nb_field; | PetscInt nb_field; | |||
// total number of elements of all fields in this class | // total number of elements of all fields in this class | |||
PetscInt n_elem; | PetscInt n_elem; | |||
// GmshTag[j] = tag of field j (in getdp/gmsh, ie : outside IterativeLinearSol | // GmshTag[j] = tag of field j (in getdp/gmsh, ie : outside | |||
ver) | // IterativeLinearSolver) | |||
std::vector<PetscInt> GmshTag; | std::vector<PetscInt> GmshTag; | |||
// ILSTag[j] = local tag of field j in the function IterativeLinearSolver | // ILSTag[j] = local tag of field j in the function IterativeLinearSolver | |||
// (usefull for MyField). | // (usefull for MyField). | |||
std::vector<PetscInt> ILSTag; | std::vector<PetscInt> ILSTag; | |||
// rank[j] is the mpi_rank of the process that owns field j | // rank[j] is the mpi_rank of the process that owns field j | |||
std::vector<PetscInt> rank; | std::vector<PetscInt> rank; | |||
// size[j] = nb of elements in the field j | // size[j] = nb of elements in the field j | |||
std::vector<PetscInt> size; | std::vector<PetscInt> size; | |||
// starting index in the Petsc Vec containing all the fields | // starting index in the Petsc Vec containing all the fields | |||
std::vector<PetscInt> iStart; | std::vector<PetscInt> iStart; | |||
skipping to change at line 90 | skipping to change at line 93 | |||
static bool areNeighbor; | static bool areNeighbor; | |||
// number of field that this process must receive | // number of field that this process must receive | |||
int nb_field_to_receive; | int nb_field_to_receive; | |||
std::vector<std::vector<PetscInt> > myN; | std::vector<std::vector<PetscInt> > myN; | |||
// sizes of vectors of PView that this process is in charge | // sizes of vectors of PView that this process is in charge | |||
std::vector<std::vector<PetscInt> > mySizeV; | std::vector<std::vector<PetscInt> > mySizeV; | |||
std::vector<std::vector<PetscInt> > theirN; | std::vector<std::vector<PetscInt> > theirN; | |||
std::vector<std::vector<PetscInt> > theirSizeV; | std::vector<std::vector<PetscInt> > theirSizeV; | |||
// GmshTag of the fields that must be received by the current MPI processe | // GmshTag of the fields that must be received by the current MPI processe | |||
// (concatenation of myNeighbor) | // (concatenation of myNeighbor) | |||
std::vector<PetscInt> FieldToReceive; | std::vector<PetscInt> FieldToReceive; | |||
// RankToSend[j] returns the rank to which the j^th local field must be sent | // RankToSend[j] returns the rank to which the j^th local field must be sent | |||
std::vector<std::vector<PetscInt> > RankToSend; | std::vector<std::vector<PetscInt> > RankToSend; | |||
// CPU Time | // CPU Time | |||
std::vector<double> TimeBcast, TimeIt, TimeTreatment; | std::vector<double> TimeBcast, TimeIt, TimeTreatment; | |||
// The below function is useful to do a reverse search: Given the GmshTag of a | // The below function is useful to do a reverse search: Given the GmshTag of a | |||
// field (GetDP/GMSH) it returns its local tag in IterativeLinearSolver | // field (GetDP/GMSH) it returns its local tag in IterativeLinearSolver | |||
// (ILSTag) Indeed, ILS can renumber the field in another way than gmsh/getdp | // (ILSTag) Indeed, ILS can renumber the field in another way than gmsh/getdp | |||
int GetILSTagFromGmshTag(int gTag) | int GetILSTagFromGmshTag(int gTag) | |||
{ | { | |||
for (int j = 0; j < nb_field ; j++) | for(int j = 0; j < nb_field; j++) | |||
if(GmshTag[j] == gTag) return ILSTag[j]; | if(GmshTag[j] == gTag) return ILSTag[j]; | |||
return -1; //error | return -1; // error | |||
} | } | |||
int GetRankFromGmshTag(int gTag) | int GetRankFromGmshTag(int gTag) | |||
{ | { | |||
for (int j = 0; j < nb_field ; j++) | for(int j = 0; j < nb_field; j++) | |||
if(GmshTag[j] == gTag) return rank[j]; | if(GmshTag[j] == gTag) return rank[j]; | |||
return -1; //error | return -1; // error | |||
} | } | |||
int GetRankFromILSTag(int ilsTag) | int GetRankFromILSTag(int ilsTag) | |||
{ | { | |||
for (int j = 0; j < nb_field ; j++) | for(int j = 0; j < nb_field; j++) | |||
if(ILSTag[j] == ilsTag) return rank[j]; | if(ILSTag[j] == ilsTag) return rank[j]; | |||
return -1; //error | return -1; // error | |||
} | } | |||
int GetGmshTagFromRank(int irank) | int GetGmshTagFromRank(int irank) | |||
{ | { | |||
for (int j = 0; j < nb_field ; j++) | for(int j = 0; j < nb_field; j++) | |||
if(rank[j] == irank) return GmshTag[j]; | if(rank[j] == irank) return GmshTag[j]; | |||
return -1; //error | return -1; // error | |||
} | } | |||
}; | }; | |||
bool ILSField::areNeighbor = false; | bool ILSField::areNeighbor = false; | |||
// pointers to MyField and AllField, valid while Operation_LinearIterativeSolver | // pointers to MyField and AllField, valid while Operation_LinearIterativeSolver | |||
// is running; This is used by Operation_BroadcastFields to explicitely | // is running; This is used by Operation_BroadcastFields to explicitely | |||
// braodcast the fields in the middle of an ILSMatVec call. | // braodcast the fields in the middle of an ILSMatVec call. | |||
static ILSField *MyStaticField = 0, *AllStaticField = 0; | static ILSField *MyStaticField = 0, *AllStaticField = 0; | |||
// Matrix Free structure (Matrix Shell) | // Matrix Free structure (Matrix Shell) | |||
typedef struct{ | typedef struct { | |||
char *LinearSystemType; | char *LinearSystemType; | |||
ILSField *MyField; | ILSField *MyField; | |||
ILSField *AllField; | ILSField *AllField; | |||
struct Resolution *Resolution_P; | struct Resolution *Resolution_P; | |||
struct Operation *Operation_P; | struct Operation *Operation_P; | |||
struct DofData *DofData_P0; | struct DofData *DofData_P0; | |||
struct GeoData *GeoData_P0; | struct GeoData *GeoData_P0; | |||
} ILSMat; | } ILSMat; | |||
static PView *GetViewByTag(int tag) | static PView *GetViewByTag(int tag) | |||
{ | { | |||
PView *view = PView::getViewByTag(tag); | PView *view = PView::getViewByTag(tag); | |||
if(!view) Message::Error("View %d does not exist"); | if(!view) Message::Error("View %d does not exist"); | |||
return view; | return view; | |||
} | } | |||
static PetscErrorCode InitData(ILSField *MyField, ILSField *AllField, | static PetscErrorCode | |||
struct Operation *Operation_P, | InitData(ILSField *MyField, ILSField *AllField, struct Operation *Operation_P, | |||
std::vector<std::vector<std::vector<double> > > * | std::vector<std::vector<std::vector<double> > > *B_std) | |||
B_std) | ||||
{ | { | |||
int mpi_comm_size = Message::GetCommSize(); | int mpi_comm_size = Message::GetCommSize(); | |||
int mpi_comm_rank = Message::GetCommRank(); | int mpi_comm_rank = Message::GetCommRank(); | |||
std::vector<PetscInt> tab_nb_field_loc; | std::vector<PetscInt> tab_nb_field_loc; | |||
std::vector<PetscInt> displs(mpi_comm_size); | std::vector<PetscInt> displs(mpi_comm_size); | |||
int counter = 0; | int counter = 0; | |||
// number of fields owned by me and the other tasks | // number of fields owned by me and the other tasks | |||
MyField->nb_field = List_Nbr(Operation_P->Case.IterativeLinearSolver.MyFieldTa | MyField->nb_field = | |||
g); | List_Nbr(Operation_P->Case.IterativeLinearSolver.MyFieldTag); | |||
tab_nb_field_loc.resize(mpi_comm_size); | tab_nb_field_loc.resize(mpi_comm_size); | |||
MPI_Allgather(&MyField->nb_field, 1, MPI_INT, &tab_nb_field_loc[0], | MPI_Allgather(&MyField->nb_field, 1, MPI_INT, &tab_nb_field_loc[0], 1, | |||
1, MPI_INT, PETSC_COMM_WORLD); | MPI_INT, PETSC_COMM_WORLD); | |||
AllField->nb_field = 0; | AllField->nb_field = 0; | |||
for (int irank = 0 ; irank < mpi_comm_size ; irank ++) | for(int irank = 0; irank < mpi_comm_size; irank++) | |||
AllField->nb_field += tab_nb_field_loc[irank]; | AllField->nb_field += tab_nb_field_loc[irank]; | |||
// displacement vector (for MPI_AllGatherV) | // displacement vector (for MPI_AllGatherV) | |||
displs[0] = 0; | displs[0] = 0; | |||
for (int irank = 1 ; irank < mpi_comm_size ; irank ++) | for(int irank = 1; irank < mpi_comm_size; irank++) | |||
displs[irank] = tab_nb_field_loc[irank-1] + displs[irank-1]; | displs[irank] = tab_nb_field_loc[irank - 1] + displs[irank - 1]; | |||
// Tag of the fields owned by me .... | // Tag of the fields owned by me .... | |||
MyField->GmshTag.resize(MyField->nb_field); | MyField->GmshTag.resize(MyField->nb_field); | |||
MyField->ILSTag.resize(MyField->nb_field); | MyField->ILSTag.resize(MyField->nb_field); | |||
MyField->rank.resize(MyField->nb_field); | MyField->rank.resize(MyField->nb_field); | |||
for(int iField = 0; iField < MyField->nb_field; iField++) { | for(int iField = 0; iField < MyField->nb_field; iField++) { | |||
double d; | double d; | |||
List_Read(Operation_P->Case.IterativeLinearSolver.MyFieldTag, iField, &d); | List_Read(Operation_P->Case.IterativeLinearSolver.MyFieldTag, iField, &d); | |||
MyField->GmshTag[iField] = (int)d; | MyField->GmshTag[iField] = (int)d; | |||
MyField->rank[iField] = mpi_comm_rank; | MyField->rank[iField] = mpi_comm_rank; | |||
MyField->ILSTag[iField] = displs[mpi_comm_rank] + iField; | MyField->ILSTag[iField] = displs[mpi_comm_rank] + iField; | |||
} | } | |||
// ...and by the other tasks | // ...and by the other tasks | |||
AllField->GmshTag.resize(AllField->nb_field); | AllField->GmshTag.resize(AllField->nb_field); | |||
AllField->rank.resize(AllField->nb_field); | AllField->rank.resize(AllField->nb_field); | |||
AllField->ILSTag.resize(AllField->nb_field); | AllField->ILSTag.resize(AllField->nb_field); | |||
for (int iField = 0; iField < AllField->nb_field ; iField ++) | for(int iField = 0; iField < AllField->nb_field; iField++) | |||
AllField->ILSTag[iField] = iField; | AllField->ILSTag[iField] = iField; | |||
MPI_Allgatherv(&MyField->GmshTag[0], MyField->nb_field, MPI_INT, | MPI_Allgatherv(&MyField->GmshTag[0], MyField->nb_field, MPI_INT, | |||
&AllField->GmshTag[0], &tab_nb_field_loc[0], &displs[0], MPI_IN | &AllField->GmshTag[0], &tab_nb_field_loc[0], &displs[0], | |||
T, | MPI_INT, PETSC_COMM_WORLD); | |||
PETSC_COMM_WORLD); | ||||
MPI_Allgatherv(&MyField->rank[0], MyField->nb_field, MPI_INT, | MPI_Allgatherv(&MyField->rank[0], MyField->nb_field, MPI_INT, | |||
&AllField->rank[0], &tab_nb_field_loc[0], &displs[0], MPI_INT, | &AllField->rank[0], &tab_nb_field_loc[0], &displs[0], MPI_INT, | |||
PETSC_COMM_WORLD); | PETSC_COMM_WORLD); | |||
// Now the (local) fields in RAM must be read | // Now the (local) fields in RAM must be read | |||
(*B_std).resize(MyField->nb_field); | (*B_std).resize(MyField->nb_field); | |||
MyField->n_elem = 0; | MyField->n_elem = 0; | |||
MyField->size.resize(MyField->nb_field); | MyField->size.resize(MyField->nb_field); | |||
for(int iField = 0; iField < MyField->nb_field; iField ++) { | for(int iField = 0; iField < MyField->nb_field; iField++) { | |||
(*B_std)[iField].resize(2); | (*B_std)[iField].resize(2); | |||
int d; | int d; | |||
PView *view = GetViewByTag(MyField->GmshTag[iField]); | PView *view = GetViewByTag(MyField->GmshTag[iField]); | |||
view->getData()->toVector((*B_std)[iField]); | view->getData()->toVector((*B_std)[iField]); | |||
d = (*B_std)[iField][0].size(); | d = (*B_std)[iField][0].size(); | |||
MyField->size[iField] = d; | MyField->size[iField] = d; | |||
MyField->n_elem += d; | MyField->n_elem += d; | |||
} | } | |||
// Share information on the size of the local fields with other tasks | // Share information on the size of the local fields with other tasks | |||
MPI_Allreduce(&MyField->n_elem, &AllField->n_elem, 1, MPI_INT, | MPI_Allreduce(&MyField->n_elem, &AllField->n_elem, 1, MPI_INT, MPI_SUM, | |||
MPI_SUM, PETSC_COMM_WORLD); | PETSC_COMM_WORLD); | |||
AllField->size.resize(AllField->nb_field); | AllField->size.resize(AllField->nb_field); | |||
MPI_Allgatherv(&MyField->size[0], MyField->nb_field, MPI_INT, | MPI_Allgatherv(&MyField->size[0], MyField->nb_field, MPI_INT, | |||
&AllField->size[0], &tab_nb_field_loc[0], &displs[0], MPI_INT, | &AllField->size[0], &tab_nb_field_loc[0], &displs[0], MPI_INT, | |||
PETSC_COMM_WORLD); | PETSC_COMM_WORLD); | |||
// Compute the starting/ending index in the futur Petsc Vec containing all the | // Compute the starting/ending index in the futur Petsc Vec containing all the | |||
Gmsh fields | // Gmsh fields | |||
AllField->iStart.resize(AllField->nb_field); | AllField->iStart.resize(AllField->nb_field); | |||
AllField->iEnd.resize(AllField->nb_field); | AllField->iEnd.resize(AllField->nb_field); | |||
MyField->iStart.resize(MyField->nb_field); | MyField->iStart.resize(MyField->nb_field); | |||
MyField->iEnd.resize(MyField->nb_field); | MyField->iEnd.resize(MyField->nb_field); | |||
AllField->iStart[0] = 0; | AllField->iStart[0] = 0; | |||
counter = 0; | counter = 0; | |||
for(int j = 0; j < AllField->nb_field; j++){ | for(int j = 0; j < AllField->nb_field; j++) { | |||
if(j > 0) | if(j > 0) AllField->iStart[j] = AllField->iEnd[j - 1] + 1; | |||
AllField->iStart[j] = AllField->iEnd[j-1] + 1; | ||||
AllField->iEnd[j] = AllField->iStart[j] + AllField->size[j] - 1; | AllField->iEnd[j] = AllField->iStart[j] + AllField->size[j] - 1; | |||
// Store in MyField if I am in charge of the Field | // Store in MyField if I am in charge of the Field | |||
if(AllField->rank[j] == mpi_comm_rank){ | if(AllField->rank[j] == mpi_comm_rank) { | |||
MyField->iStart[counter] = AllField->iStart[j]; | MyField->iStart[counter] = AllField->iStart[j]; | |||
MyField->iEnd[counter] = AllField->iEnd[j]; | MyField->iEnd[counter] = AllField->iEnd[j]; | |||
counter++; | counter++; | |||
} | } | |||
} | } | |||
// Who are my Neighbors for the Broadcast ? At the time of writing, GetDP does | // Who are my Neighbors for the Broadcast ? At the time of writing, GetDP does | |||
// not manage 2D Lists. Thus, to act as-if, the list of neighbors is composed | // not manage 2D Lists. Thus, to act as-if, the list of neighbors is composed | |||
// as follows: | // as follows: | |||
// NeighborFieldTag = {n_0, ... n_0 GmshTag ... , n_1, ... n_1 GmshTag, ... | // NeighborFieldTag = {n_0, ... n_0 GmshTag ... , n_1, ... n_1 GmshTag, | |||
} | // ...} | |||
// For example, if | // For example, if | |||
// MyFieldTag = {0, 3} | // MyFieldTag = {0, 3} | |||
// NeighborFieldTag = {2, 5, 1, 3, 2, 4, 6} | // NeighborFieldTag = {2, 5, 1, 3, 2, 4, 6} | |||
// This mean that current process is in charge of Field with GmshTag 0 and 7. | // This mean that current process is in charge of Field with GmshTag 0 and 7. | |||
// Field of GmshTag 0 has 2 neighbors : fields of GmshTag 5 and 1 | // Field of GmshTag 0 has 2 neighbors : fields of GmshTag 5 and 1 | |||
// Field of GmshTag 7 has 3 neighbors : fields of GmshTag 2, 4 and 6 | // Field of GmshTag 7 has 3 neighbors : fields of GmshTag 2, 4 and 6 | |||
// (if GetDP changes and accepts lists of lists, then this trick should be use | // (if GetDP changes and accepts lists of lists, then this trick should be | |||
less | // useless and changed !) | |||
// and changed !) | ||||
int nNeighbor_aux = 0; | int nNeighbor_aux = 0; | |||
nNeighbor_aux = List_Nbr(Operation_P->Case.IterativeLinearSolver.NeighborField | nNeighbor_aux = | |||
Tag); | List_Nbr(Operation_P->Case.IterativeLinearSolver.NeighborFieldTag); | |||
// make every process agreed on whether there is neighbor or not | // make every process agreed on whether there is neighbor or not | |||
if(mpi_comm_size < 2){ | if(mpi_comm_size < 2) { ILSField::areNeighbor = false; } | |||
ILSField::areNeighbor = false; | else { | |||
} | // suppose it's true | |||
else{ | ||||
//suppose it's true | ||||
ILSField::areNeighbor = true; | ILSField::areNeighbor = true; | |||
//share info on neighbor | // share info on neighbor | |||
int bool_neigh = (nNeighbor_aux > 0); | int bool_neigh = (nNeighbor_aux > 0); | |||
std::vector<int> tab_bool_neigh(mpi_comm_size); | std::vector<int> tab_bool_neigh(mpi_comm_size); | |||
MPI_Allgather(&bool_neigh, 1, MPI_INT, &tab_bool_neigh[0], 1, | MPI_Allgather(&bool_neigh, 1, MPI_INT, &tab_bool_neigh[0], 1, MPI_INT, | |||
MPI_INT, MPI_COMM_WORLD); | MPI_COMM_WORLD); | |||
for(int irank = 0; irank < mpi_comm_size ; irank ++) | for(int irank = 0; irank < mpi_comm_size; irank++) | |||
if(tab_bool_neigh[irank] == 0 && AllField->GetGmshTagFromRank(irank) >= 0) | if(tab_bool_neigh[irank] == 0 && AllField->GetGmshTagFromRank(irank) >= 0) | |||
// if one process has no neighbord AND is charge of some fields (=is a w | // if one process has no neighbord AND is charge of some fields (=is a | |||
orker) | // worker) | |||
ILSField::areNeighbor = false; | ILSField::areNeighbor = false; | |||
} | } | |||
if(ILSField::areNeighbor){ | if(ILSField::areNeighbor) { | |||
int cpt_neigh = 0; // counter in list IterativeLinearSolver.NeighborFieldTag | int cpt_neigh = 0; // counter in list IterativeLinearSolver.NeighborFieldTag | |||
// for every field, RankToSend contain the rank of the process in need of | // for every field, RankToSend contain the rank of the process in need of | |||
// the field | // the field | |||
MyField->RankToSend.resize(MyField->nb_field); | MyField->RankToSend.resize(MyField->nb_field); | |||
int cpt_send = 0; | int cpt_send = 0; | |||
// over-sizing FieldToReceive, which contains the field that are needed by | // over-sizing FieldToReceive, which contains the field that are needed by | |||
// this mpi process | // this mpi process | |||
MyField->FieldToReceive.resize(AllField->nb_field - MyField->nb_field); | MyField->FieldToReceive.resize(AllField->nb_field - MyField->nb_field); | |||
int cpt_recv = 0; | int cpt_recv = 0; | |||
// read through every neighbors | // read through every neighbors | |||
for(int ifield = 0 ; ifield < MyField->nb_field ; ifield++){ | for(int ifield = 0; ifield < MyField->nb_field; ifield++) { | |||
double d; | double d; | |||
List_Read(Operation_P->Case.IterativeLinearSolver.NeighborFieldTag, | List_Read(Operation_P->Case.IterativeLinearSolver.NeighborFieldTag, | |||
cpt_neigh, &d); | cpt_neigh, &d); | |||
int n_neigh = (int)d; | int n_neigh = (int)d; | |||
cpt_send = 0; | cpt_send = 0; | |||
//at maximum n_neigh process to send this view | // at maximum n_neigh process to send this view | |||
MyField->RankToSend[ifield].resize(n_neigh); | MyField->RankToSend[ifield].resize(n_neigh); | |||
for(int j = 0; j < n_neigh ; j++){ | for(int j = 0; j < n_neigh; j++) { | |||
//counter in list NeighborFieldTag | // counter in list NeighborFieldTag | |||
cpt_neigh ++; | cpt_neigh++; | |||
List_Read(Operation_P->Case.IterativeLinearSolver.NeighborFieldTag, | List_Read(Operation_P->Case.IterativeLinearSolver.NeighborFieldTag, | |||
cpt_neigh, &d); | cpt_neigh, &d); | |||
int GmshTag_newneigh = (int)d; | int GmshTag_newneigh = (int)d; | |||
// Check if not already stored (either because this process is in charge | // Check if not already stored (either because this process is in charge | |||
// of the field or due to a doublon) | // of the field or due to a doublon) | |||
bool isStored = false; | bool isStored = false; | |||
for(int i = 0; i < MyField->nb_field ; i++){ | for(int i = 0; i < MyField->nb_field; i++) { | |||
if(GmshTag_newneigh == MyField->GmshTag[i]){ | if(GmshTag_newneigh == MyField->GmshTag[i]) { | |||
isStored = true; | isStored = true; | |||
break; | break; | |||
} | } | |||
} | } | |||
for(int i = 0; i < cpt_recv ; i++){ | for(int i = 0; i < cpt_recv; i++) { | |||
if(GmshTag_newneigh == MyField->FieldToReceive[i]){ | if(GmshTag_newneigh == MyField->FieldToReceive[i]) { | |||
isStored = true; | isStored = true; | |||
break; | break; | |||
} | } | |||
} | } | |||
// in case it's not already store | // in case it's not already store | |||
if(!isStored){ | if(!isStored) { | |||
MyField->FieldToReceive[cpt_recv] = GmshTag_newneigh; | MyField->FieldToReceive[cpt_recv] = GmshTag_newneigh; | |||
cpt_recv++; | cpt_recv++; | |||
} | } | |||
// check if stored in the table of Mpi processes which will receive this | // check if stored in the table of Mpi processes which will receive this | |||
field | // field | |||
isStored = false; | isStored = false; | |||
int rank_new_neigh = | int rank_new_neigh = | |||
AllField->rank[AllField->GetILSTagFromGmshTag(GmshTag_newneigh)]; | AllField->rank[AllField->GetILSTagFromGmshTag(GmshTag_newneigh)]; | |||
MyField->RankToSend[ifield].resize(n_neigh); | MyField->RankToSend[ifield].resize(n_neigh); | |||
// Maybe this process is in charge of this field.. | // Maybe this process is in charge of this field.. | |||
if(rank_new_neigh == mpi_comm_rank) | if(rank_new_neigh == mpi_comm_rank) | |||
isStored = true; | isStored = true; | |||
else{ //...or maybe it is already stored ... | else { //...or maybe it is already stored ... | |||
for(int i = 0; i < cpt_send ; i++){ | for(int i = 0; i < cpt_send; i++) { | |||
if(rank_new_neigh == MyField->RankToSend[ifield][i]){ | if(rank_new_neigh == MyField->RankToSend[ifield][i]) { | |||
isStored = true; | isStored = true; | |||
break; } | break; | |||
} | ||||
} | } | |||
} | } | |||
if(!isStored){ // not already stored | if(!isStored) { // not already stored | |||
MyField->RankToSend[ifield][cpt_send] = rank_new_neigh; | MyField->RankToSend[ifield][cpt_send] = rank_new_neigh; | |||
cpt_send++; | cpt_send++; | |||
} | } | |||
} | } | |||
// resize | // resize | |||
MyField->RankToSend[ifield].resize(cpt_send); | MyField->RankToSend[ifield].resize(cpt_send); | |||
cpt_neigh++; | cpt_neigh++; | |||
} | } | |||
// resize | // resize | |||
MyField->FieldToReceive.resize(cpt_recv); | MyField->FieldToReceive.resize(cpt_recv); | |||
MyField->nb_field_to_receive = cpt_recv; | MyField->nb_field_to_receive = cpt_recv; | |||
// Check and exchange information on the size of the PView | // Check and exchange information on the size of the PView | |||
// Exchange information on the size of the PView (Field) with the neighbors | // Exchange information on the size of the PView (Field) with the neighbors | |||
MyField->myN.resize(MyField->nb_field); | MyField->myN.resize(MyField->nb_field); | |||
MyField->mySizeV.resize(MyField->nb_field); | MyField->mySizeV.resize(MyField->nb_field); | |||
std::vector< MPI_Request > tab_request(0); | std::vector<MPI_Request> tab_request(0); | |||
for (int mfield = 0 ; mfield < MyField->nb_field ; mfield ++){ | for(int mfield = 0; mfield < MyField->nb_field; mfield++) { | |||
// Measure the size of the vectors of Field of local number mfield | // Measure the size of the vectors of Field of local number mfield | |||
std::vector< std::vector<double>* > V(24); | std::vector<std::vector<double> *> V(24); | |||
MyField->myN[mfield].resize(24); | MyField->myN[mfield].resize(24); | |||
MyField->mySizeV[mfield].resize(24); | MyField->mySizeV[mfield].resize(24); | |||
int GmshTag = MyField->GmshTag[mfield]; | int GmshTag = MyField->GmshTag[mfield]; | |||
PView *view = GetViewByTag(GmshTag); | PView *view = GetViewByTag(GmshTag); | |||
view->getData()->getListPointers(&(MyField->myN[mfield][0]), &V[0]); | view->getData()->getListPointers(&(MyField->myN[mfield][0]), &V[0]); | |||
for(int j = 0 ; j < 24 ; j++) | for(int j = 0; j < 24; j++) | |||
MyField->mySizeV[mfield][j] = (*(V[j])).size(); | MyField->mySizeV[mfield][j] = (*(V[j])).size(); | |||
// Exchange information about the sizes (mySizeV and myN) | // Exchange information about the sizes (mySizeV and myN) | |||
int n_proc_to_send = MyField->RankToSend[mfield].size(); | int n_proc_to_send = MyField->RankToSend[mfield].size(); | |||
for(int j = 0 ; j < n_proc_to_send ; j++){ | for(int j = 0; j < n_proc_to_send; j++) { | |||
MPI_Request sendN, sendSizeV; | MPI_Request sendN, sendSizeV; | |||
int tagN = 10*GmshTag + 1; | int tagN = 10 * GmshTag + 1; | |||
int tagSizeV = 10*GmshTag + 2; | int tagSizeV = 10 * GmshTag + 2; | |||
// send vector myN and mysizeV | // send vector myN and mysizeV | |||
MPI_Isend(&(MyField->myN[mfield][0]), 24, MPI_INT, | MPI_Isend(&(MyField->myN[mfield][0]), 24, MPI_INT, | |||
MyField->RankToSend[mfield][j], tagN, MPI_COMM_WORLD, &sendN); | MyField->RankToSend[mfield][j], tagN, MPI_COMM_WORLD, &sendN); | |||
MPI_Isend(&(MyField->mySizeV[mfield][0]), 24, MPI_INT, | MPI_Isend(&(MyField->mySizeV[mfield][0]), 24, MPI_INT, | |||
MyField->RankToSend[mfield][j], tagSizeV, MPI_COMM_WORLD, &sen | MyField->RankToSend[mfield][j], tagSizeV, MPI_COMM_WORLD, | |||
dSizeV); | &sendSizeV); | |||
tab_request.push_back(sendN); | tab_request.push_back(sendN); | |||
tab_request.push_back(sendSizeV); | tab_request.push_back(sendSizeV); | |||
} | } | |||
} | } | |||
// Receive information from the other process | // Receive information from the other process | |||
MyField->theirN.resize(MyField->nb_field_to_receive); | MyField->theirN.resize(MyField->nb_field_to_receive); | |||
MyField->theirSizeV.resize(MyField->nb_field_to_receive); | MyField->theirSizeV.resize(MyField->nb_field_to_receive); | |||
for (int ifield = 0 ; ifield < MyField->nb_field_to_receive ; ifield ++){ | for(int ifield = 0; ifield < MyField->nb_field_to_receive; ifield++) { | |||
MPI_Request recvN, recvSizeV; | MPI_Request recvN, recvSizeV; | |||
// receive information on vectors N and sizeV from the other | // receive information on vectors N and sizeV from the other | |||
int fieldGmshTag = MyField->FieldToReceive[ifield]; | int fieldGmshTag = MyField->FieldToReceive[ifield]; | |||
int fieldILSTag = AllField->GetILSTagFromGmshTag(fieldGmshTag); | int fieldILSTag = AllField->GetILSTagFromGmshTag(fieldGmshTag); | |||
int rank_emiter = AllField->rank[fieldILSTag]; | int rank_emiter = AllField->rank[fieldILSTag]; | |||
int tagN = 10*fieldGmshTag + 1; | int tagN = 10 * fieldGmshTag + 1; | |||
int tagSizeV = 10*fieldGmshTag + 2; | int tagSizeV = 10 * fieldGmshTag + 2; | |||
// resize before receiving | // resize before receiving | |||
MyField->theirN[ifield].resize(24); | MyField->theirN[ifield].resize(24); | |||
MyField->theirSizeV[ifield].resize(24); | MyField->theirSizeV[ifield].resize(24); | |||
// Receive | // Receive | |||
MPI_Irecv(&(MyField->theirN[ifield][0]), 24, MPI_INT, rank_emiter, tagN, | MPI_Irecv(&(MyField->theirN[ifield][0]), 24, MPI_INT, rank_emiter, tagN, | |||
MPI_COMM_WORLD, &recvN); | MPI_COMM_WORLD, &recvN); | |||
MPI_Irecv(&(MyField->theirSizeV[ifield][0]), 24, MPI_INT, rank_emiter, tag | MPI_Irecv(&(MyField->theirSizeV[ifield][0]), 24, MPI_INT, rank_emiter, | |||
SizeV, | tagSizeV, MPI_COMM_WORLD, &recvSizeV); | |||
MPI_COMM_WORLD, &recvSizeV); | ||||
tab_request.push_back(recvN); | tab_request.push_back(recvN); | |||
tab_request.push_back(recvSizeV); | tab_request.push_back(recvSizeV); | |||
} | } | |||
// check if reception is ok | // check if reception is ok | |||
std::vector< MPI_Status > tab_status; | std::vector<MPI_Status> tab_status; | |||
MPI_Waitall(tab_request.size(), &tab_request[0], &tab_status[0]); | MPI_Waitall(tab_request.size(), &tab_request[0], &tab_status[0]); | |||
} | } | |||
// keep track of fields for external use | // keep track of fields for external use | |||
MyStaticField = MyField; | MyStaticField = MyField; | |||
AllStaticField = AllField; | AllStaticField = AllField; | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Communicate PViews | // Communicate PViews | |||
static PetscErrorCode PViewBCast(ILSField MyField, ILSField AllField, | static PetscErrorCode | |||
const std::set<int> &fieldsToSkip=std::set<int> | PViewBCast(ILSField MyField, ILSField AllField, | |||
()) | const std::set<int> &fieldsToSkip = std::set<int>()) | |||
{ | { | |||
if(Message::GetCommSize() == 1) // serial: all views are available to everyone | if(Message::GetCommSize() == 1) // serial: all views are available to everyone | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
if(!(ILSField::areNeighbor)){ | if(!(ILSField::areNeighbor)) { | |||
// broadcast all views | // broadcast all views | |||
for (int iField = 0 ; iField < AllField.nb_field ; iField++){ | for(int iField = 0; iField < AllField.nb_field; iField++) { | |||
int GmshTag = AllField.GmshTag[iField]; | int GmshTag = AllField.GmshTag[iField]; | |||
int fieldRank = AllField.rank[iField]; | int fieldRank = AllField.rank[iField]; | |||
std::vector< std::vector<double>* > V(24); | std::vector<std::vector<double> *> V(24); | |||
std::vector<int> sizeV(24); | std::vector<int> sizeV(24); | |||
std::vector<int> N(24); | std::vector<int> N(24); | |||
int masterRank = fieldRank; | int masterRank = fieldRank; | |||
MPI_Comm fieldcomm = MPI_COMM_WORLD; | MPI_Comm fieldcomm = MPI_COMM_WORLD; | |||
int mpi_fieldcomm_rank = Message::GetCommRank(); | int mpi_fieldcomm_rank = Message::GetCommRank(); | |||
if(mpi_fieldcomm_rank == fieldRank){ | if(mpi_fieldcomm_rank == fieldRank) { | |||
PView *view = GetViewByTag(GmshTag); | PView *view = GetViewByTag(GmshTag); | |||
view->getData()->getListPointers(&N[0], &V[0]); | view->getData()->getListPointers(&N[0], &V[0]); | |||
for(int j = 0 ; j < 24 ; j++) | for(int j = 0; j < 24; j++) sizeV[j] = (*(V[j])).size(); | |||
sizeV[j] = (*(V[j])).size(); | ||||
} | } | |||
// Transfer PView | // Transfer PView | |||
MPI_Bcast(&N[0], 24, MPI_INT, masterRank, fieldcomm); | MPI_Bcast(&N[0], 24, MPI_INT, masterRank, fieldcomm); | |||
MPI_Bcast(&sizeV[0], 24, MPI_INT, masterRank, fieldcomm); | MPI_Bcast(&sizeV[0], 24, MPI_INT, masterRank, fieldcomm); | |||
for(int j = 0; j < 24 ; j ++){ | for(int j = 0; j < 24; j++) { | |||
if(mpi_fieldcomm_rank != masterRank){ | if(mpi_fieldcomm_rank != masterRank) { | |||
V[j] = new std::vector<double>; | V[j] = new std::vector<double>; | |||
(*(V[j])).resize(sizeV[j]); | (*(V[j])).resize(sizeV[j]); | |||
} | } | |||
if(sizeV[j] > 0) //avoid useless BCast | if(sizeV[j] > 0) // avoid useless BCast | |||
MPI_Bcast(&(*(V[j]))[0], sizeV[j], MPI_DOUBLE, masterRank, fieldcomm); | MPI_Bcast(&(*(V[j]))[0], sizeV[j], MPI_DOUBLE, masterRank, fieldcomm); | |||
} | } | |||
// All other tasks of the communicator create/update the views | // All other tasks of the communicator create/update the views | |||
if(mpi_fieldcomm_rank != masterRank){ | if(mpi_fieldcomm_rank != masterRank) { | |||
PView *view = new PView(GmshTag); | PView *view = new PView(GmshTag); | |||
view->getData()->importLists(&N[0], &V[0]); | view->getData()->importLists(&N[0], &V[0]); | |||
for(int j = 0 ; j < 24 ; j++) | for(int j = 0; j < 24; j++) delete V[j]; | |||
delete V[j] ; | ||||
} | } | |||
} | } | |||
} | } | |||
else{ | else { | |||
// With a specification on the neighbors, asynchronous Send/Recv (only with | // With a specification on the neighbors, asynchronous Send/Recv (only with | |||
// the neighbors) | // the neighbors) | |||
std::vector< MPI_Request > tab_request(0); | std::vector<MPI_Request> tab_request(0); | |||
// send my PView to my neighbors | // send my PView to my neighbors | |||
for (int ifield = 0 ; ifield < MyField.nb_field ; ifield ++){ | for(int ifield = 0; ifield < MyField.nb_field; ifield++) { | |||
int GmshTag = MyField.GmshTag[ifield]; | int GmshTag = MyField.GmshTag[ifield]; | |||
// don't send field if explicitely asked to skip it | // don't send field if explicitely asked to skip it | |||
if(fieldsToSkip.find(GmshTag) != fieldsToSkip.end()) continue; | if(fieldsToSkip.find(GmshTag) != fieldsToSkip.end()) continue; | |||
PView *view = GetViewByTag(GmshTag); | PView *view = GetViewByTag(GmshTag); | |||
std::vector< std::vector<double>* > V_send(24); | std::vector<std::vector<double> *> V_send(24); | |||
std::vector<int> N(24); | std::vector<int> N(24); | |||
view->getData()->getListPointers(&N[0], &V_send[0]); | view->getData()->getListPointers(&N[0], &V_send[0]); | |||
for (int j = 0 ; j < 24 ; j ++){ | for(int j = 0; j < 24; j++) { | |||
int tag = 100 * GmshTag + j; | int tag = 100 * GmshTag + j; | |||
int n_data = MyField.mySizeV[ifield][j]; | int n_data = MyField.mySizeV[ifield][j]; | |||
if(n_data > 0){ | if(n_data > 0) { | |||
//Loop on the receiver | // Loop on the receiver | |||
for (unsigned int ineigh = 0 ; ineigh < MyField.RankToSend[ifield].siz | for(unsigned int ineigh = 0; | |||
e() ; | ineigh < MyField.RankToSend[ifield].size(); ineigh++) { | |||
ineigh ++){ | ||||
MPI_Request sendV; | MPI_Request sendV; | |||
int receiver = MyField.RankToSend[ifield][ineigh]; | int receiver = MyField.RankToSend[ifield][ineigh]; | |||
MPI_Isend(&(*(V_send[j]))[0], n_data, MPI_DOUBLE, receiver, tag, | MPI_Isend(&(*(V_send[j]))[0], n_data, MPI_DOUBLE, receiver, tag, | |||
MPI_COMM_WORLD, &sendV); | MPI_COMM_WORLD, &sendV); | |||
tab_request.push_back(sendV); | tab_request.push_back(sendV); | |||
Message::Debug("Rank %d has sent %d", Message::GetCommRank(), GmshTa | Message::Debug("Rank %d has sent %d", Message::GetCommRank(), | |||
g); | GmshTag); | |||
} | } | |||
} | } | |||
} | } | |||
} | } | |||
//receive all the PView I need | // receive all the PView I need | |||
std::vector< std::vector< std::vector<double>* > > V_recv(MyField.nb_field_t | std::vector<std::vector<std::vector<double> *> > V_recv( | |||
o_receive); | MyField.nb_field_to_receive); | |||
for (int ifield = 0 ; ifield < MyField.nb_field_to_receive ; ifield ++){ | for(int ifield = 0; ifield < MyField.nb_field_to_receive; ifield++) { | |||
int GmshTag = MyField.FieldToReceive[ifield]; | int GmshTag = MyField.FieldToReceive[ifield]; | |||
// don't receive field if explicitely asked to skip it | // don't receive field if explicitely asked to skip it | |||
if(fieldsToSkip.find(GmshTag) != fieldsToSkip.end()) continue; | if(fieldsToSkip.find(GmshTag) != fieldsToSkip.end()) continue; | |||
int sender = AllField.GetRankFromGmshTag(GmshTag); | int sender = AllField.GetRankFromGmshTag(GmshTag); | |||
V_recv[ifield].resize(24); | V_recv[ifield].resize(24); | |||
std::vector<int> N(24); | std::vector<int> N(24); | |||
// allocate memory | // allocate memory | |||
for (int j = 0 ; j < 24 ; j ++){ | for(int j = 0; j < 24; j++) { | |||
V_recv[ifield][j] = new std::vector<double>; | V_recv[ifield][j] = new std::vector<double>; | |||
(*(V_recv[ifield][j])).resize(MyField.theirSizeV[ifield][j]); | (*(V_recv[ifield][j])).resize(MyField.theirSizeV[ifield][j]); | |||
} | } | |||
for (int j = 0 ; j < 24 ; j ++){ | for(int j = 0; j < 24; j++) { | |||
int n_data = MyField.theirSizeV[ifield][j]; | int n_data = MyField.theirSizeV[ifield][j]; | |||
if(n_data > 0){ | if(n_data > 0) { | |||
MPI_Request recvV; | MPI_Request recvV; | |||
int tag = 100*GmshTag + j; | int tag = 100 * GmshTag + j; | |||
MPI_Irecv(&(*(V_recv[ifield][j]))[0], n_data, MPI_DOUBLE, sender, tag, | MPI_Irecv(&(*(V_recv[ifield][j]))[0], n_data, MPI_DOUBLE, sender, tag, | |||
MPI_COMM_WORLD, &recvV); | MPI_COMM_WORLD, &recvV); | |||
tab_request.push_back(recvV); | tab_request.push_back(recvV); | |||
Message::Debug("Rank %d has received %d", Message::GetCommRank(), Gmsh | Message::Debug("Rank %d has received %d", Message::GetCommRank(), | |||
Tag); | GmshTag); | |||
} | } | |||
} | } | |||
} | } | |||
// check if reception is ok | // check if reception is ok | |||
std::vector< MPI_Status > tab_status(tab_request.size()); | std::vector<MPI_Status> tab_status(tab_request.size()); | |||
MPI_Waitall(tab_request.size(), &tab_request[0], &tab_status[0]); | MPI_Waitall(tab_request.size(), &tab_request[0], &tab_status[0]); | |||
for (int ifield = 0 ; ifield < MyField.nb_field_to_receive ; ifield ++){ | for(int ifield = 0; ifield < MyField.nb_field_to_receive; ifield++) { | |||
int GmshTag = MyField.FieldToReceive[ifield]; | int GmshTag = MyField.FieldToReceive[ifield]; | |||
if(fieldsToSkip.find(GmshTag) != fieldsToSkip.end()) continue; | if(fieldsToSkip.find(GmshTag) != fieldsToSkip.end()) continue; | |||
PView *view = new PView(GmshTag); | PView *view = new PView(GmshTag); | |||
view->getData()->importLists(&MyField.theirN[ifield][0], &V_recv[ifield][0 | view->getData()->importLists(&MyField.theirN[ifield][0], | |||
]); | &V_recv[ifield][0]); | |||
for (int j = 0 ; j < 24 ; j ++){ | for(int j = 0; j < 24; j++) { delete V_recv[ifield][j]; } | |||
delete V_recv[ifield][j]; | ||||
} | ||||
} | } | |||
} | } | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Copy a STD Vector (std_vec) to a PETSc VEc (petsc_vec) | // Copy a STD Vector (std_vec) to a PETSc VEc (petsc_vec) | |||
// In fact, copy the local part only of the PETSc Vec | // In fact, copy the local part only of the PETSc Vec | |||
static PetscErrorCode STD_vector_to_PETSc_Vec | static PetscErrorCode | |||
(std::vector<std::vector<std::vector<double> > > std_vec, | STD_vector_to_PETSc_Vec(std::vector<std::vector<std::vector<double> > > std_vec, | |||
Vec petsc_vec, ILSField *Local) | Vec petsc_vec, ILSField *Local) | |||
{ | { | |||
PetscInt nb_view = Local->nb_field; | PetscInt nb_view = Local->nb_field; | |||
for (int cpt_view = 0; cpt_view < nb_view; cpt_view++){ | for(int cpt_view = 0; cpt_view < nb_view; cpt_view++) { | |||
int nb_element = Local->size[cpt_view]; | int nb_element = Local->size[cpt_view]; | |||
std::vector<PetscScalar> val; | std::vector<PetscScalar> val; | |||
std::vector<PetscInt> ix; | std::vector<PetscInt> ix; | |||
if(Current.NbrHar == 2){ | if(Current.NbrHar == 2) { | |||
#if defined(PETSC_USE_COMPLEX) | #if defined(PETSC_USE_COMPLEX) | |||
val.resize(nb_element); | val.resize(nb_element); | |||
ix.resize(nb_element); | ix.resize(nb_element); | |||
#else | #else | |||
val.resize(2*nb_element); | val.resize(2 * nb_element); | |||
ix.resize(2*nb_element); | ix.resize(2 * nb_element); | |||
#endif | #endif | |||
} | } | |||
else{ | else { | |||
val.resize(nb_element); | val.resize(nb_element); | |||
ix.resize(nb_element); | ix.resize(nb_element); | |||
} | } | |||
for (int i = 0 ; i < nb_element ; i++){ | for(int i = 0; i < nb_element; i++) { | |||
if(Current.NbrHar == 2){ | if(Current.NbrHar == 2) { | |||
#if defined(PETSC_USE_COMPLEX) | #if defined(PETSC_USE_COMPLEX) | |||
ix[i] = Local->iStart[cpt_view] + i; | ix[i] = Local->iStart[cpt_view] + i; | |||
val[i] = std_vec[cpt_view][0][i] + PETSC_i*std_vec[cpt_view][1][i]; | val[i] = std_vec[cpt_view][0][i] + PETSC_i * std_vec[cpt_view][1][i]; | |||
#else | #else | |||
ix[2*i] = 2*Local->iStart[cpt_view] + 2*i; | ix[2 * i] = 2 * Local->iStart[cpt_view] + 2 * i; | |||
ix[2*i+1] = 2*Local->iStart[cpt_view] + 2*i+1; | ix[2 * i + 1] = 2 * Local->iStart[cpt_view] + 2 * i + 1; | |||
val[2*i] = std_vec[cpt_view][0][i]; | val[2 * i] = std_vec[cpt_view][0][i]; | |||
val[2*i+1] = std_vec[cpt_view][1][i]; | val[2 * i + 1] = std_vec[cpt_view][1][i]; | |||
#endif | #endif | |||
} | } | |||
else{ | else { | |||
ix[i] = Local->iStart[cpt_view] + i; | ix[i] = Local->iStart[cpt_view] + i; | |||
val[i] = std_vec[cpt_view][0][i]; | val[i] = std_vec[cpt_view][0][i]; | |||
} | } | |||
} | } | |||
if(Current.NbrHar == 2){ | if(Current.NbrHar == 2) { | |||
#if defined(PETSC_USE_COMPLEX) | #if defined(PETSC_USE_COMPLEX) | |||
_try(VecSetValues(petsc_vec, nb_element, &ix[0], &val[0], INSERT_VALUES)); | _try(VecSetValues(petsc_vec, nb_element, &ix[0], &val[0], INSERT_VALUES)); | |||
#else | #else | |||
_try(VecSetValues(petsc_vec, 2*nb_element, &ix[0], &val[0], INSERT_VALUES) | _try(VecSetValues(petsc_vec, 2 * nb_element, &ix[0], &val[0], | |||
); | INSERT_VALUES)); | |||
#endif | #endif | |||
} | } | |||
else{ | else { | |||
_try(VecSetValues(petsc_vec, nb_element, &ix[0], &val[0], INSERT_VALUES)); | _try(VecSetValues(petsc_vec, nb_element, &ix[0], &val[0], INSERT_VALUES)); | |||
} | } | |||
} | } | |||
_try(VecAssemblyBegin(petsc_vec)); | _try(VecAssemblyBegin(petsc_vec)); | |||
_try(VecAssemblyEnd(petsc_vec)); | _try(VecAssemblyEnd(petsc_vec)); | |||
PetscBarrier((PetscObject)petsc_vec); | PetscBarrier((PetscObject)petsc_vec); | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Copy Petsc Vec to a std::vector | // Copy Petsc Vec to a std::vector | |||
// Send ONLY THE LOCAL Part of the PETSC VEC ! | // Send ONLY THE LOCAL Part of the PETSC VEC ! | |||
static PetscErrorCode PETSc_Vec_to_STD_Vec | static PetscErrorCode | |||
(Vec petsc_vec, ILSField *Local, | PETSc_Vec_to_STD_Vec(Vec petsc_vec, ILSField *Local, | |||
std::vector<std::vector<std::vector<double> > > *std_vec) | std::vector<std::vector<std::vector<double> > > *std_vec) | |||
{ | { | |||
PetscScalar val; | PetscScalar val; | |||
int nb_view = Local->nb_field; | int nb_view = Local->nb_field; | |||
// initializing std_vec | // initializing std_vec | |||
(*std_vec).resize(Local->nb_field); | (*std_vec).resize(Local->nb_field); | |||
for (int cpt_view = 0 ; cpt_view < nb_view ; cpt_view++){ | for(int cpt_view = 0; cpt_view < nb_view; cpt_view++) { | |||
int nb_elem = Local->size[cpt_view]; | int nb_elem = Local->size[cpt_view]; | |||
if(Current.NbrHar == 2){ | if(Current.NbrHar == 2) { | |||
(*std_vec)[cpt_view].resize(2); | (*std_vec)[cpt_view].resize(2); | |||
(*std_vec)[cpt_view][0].resize(nb_elem); | (*std_vec)[cpt_view][0].resize(nb_elem); | |||
(*std_vec)[cpt_view][1].resize(nb_elem); | (*std_vec)[cpt_view][1].resize(nb_elem); | |||
} | } | |||
else{ | else { | |||
(*std_vec)[cpt_view].resize(1); | (*std_vec)[cpt_view].resize(1); | |||
(*std_vec)[cpt_view][0].resize(nb_elem); | (*std_vec)[cpt_view][0].resize(nb_elem); | |||
} | } | |||
} | } | |||
for (int cpt_view = 0 ; cpt_view < nb_view ; cpt_view++){ | for(int cpt_view = 0; cpt_view < nb_view; cpt_view++) { | |||
int nb_element = Local->size[cpt_view]; | int nb_element = Local->size[cpt_view]; | |||
int iStart = Local->iStart[cpt_view]; | int iStart = Local->iStart[cpt_view]; | |||
for (int j = 0 ; j < nb_element ; j++) { | for(int j = 0; j < nb_element; j++) { | |||
int cpt = iStart + j; | int cpt = iStart + j; | |||
if(Current.NbrHar == 2){ | if(Current.NbrHar == 2) { | |||
#if defined(PETSC_USE_COMPLEX) | #if defined(PETSC_USE_COMPLEX) | |||
_try(VecGetValues(petsc_vec, 1, &cpt, &val)); | _try(VecGetValues(petsc_vec, 1, &cpt, &val)); | |||
(*std_vec)[cpt_view][0][j] = (double)PetscRealPart(val); | (*std_vec)[cpt_view][0][j] = (double)PetscRealPart(val); | |||
(*std_vec)[cpt_view][1][j] = (double)PetscImaginaryPart(val); | (*std_vec)[cpt_view][1][j] = (double)PetscImaginaryPart(val); | |||
#else | #else | |||
int cpt2 = 2*iStart + 2*j; | int cpt2 = 2 * iStart + 2 * j; | |||
_try(VecGetValues(petsc_vec, 1, &cpt2, &val)); | _try(VecGetValues(petsc_vec, 1, &cpt2, &val)); | |||
(*std_vec)[cpt_view][0][j] = (double)(val); | (*std_vec)[cpt_view][0][j] = (double)(val); | |||
int cpt3 = 2*iStart + 2*j+1; | int cpt3 = 2 * iStart + 2 * j + 1; | |||
_try(VecGetValues(petsc_vec, 1, &cpt3, &val)); | _try(VecGetValues(petsc_vec, 1, &cpt3, &val)); | |||
(*std_vec)[cpt_view][1][j] = (double)(val); | (*std_vec)[cpt_view][1][j] = (double)(val); | |||
#endif | #endif | |||
} | } | |||
else{ | else { | |||
_try(VecGetValues(petsc_vec, 1, &cpt, &val)); | _try(VecGetValues(petsc_vec, 1, &cpt, &val)); | |||
(*std_vec)[cpt_view][0][j] = (double)PetscRealPart(val); | (*std_vec)[cpt_view][0][j] = (double)PetscRealPart(val); | |||
} | } | |||
} | } | |||
} | } | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Initialize the MatShell Matrix | // Initialize the MatShell Matrix | |||
// Preallocate the memory | // Preallocate the memory | |||
static PetscErrorCode CreateILSMat(ILSMat **shell) | static PetscErrorCode CreateILSMat(ILSMat **shell) | |||
{ | { | |||
ILSMat *newctx; | ILSMat *newctx; | |||
std::vector<PetscInt> vec_indice, vec_size; | std::vector<PetscInt> vec_indice, vec_size; | |||
newctx = (ILSMat*)malloc(sizeof(ILSMat)); | newctx = (ILSMat *)malloc(sizeof(ILSMat)); | |||
newctx->MyField = NULL; | newctx->MyField = NULL; | |||
newctx->AllField = NULL; | newctx->AllField = NULL; | |||
newctx->LinearSystemType = NULL; | newctx->LinearSystemType = NULL; | |||
newctx->Resolution_P = NULL; | newctx->Resolution_P = NULL; | |||
newctx->Operation_P = NULL; | newctx->Operation_P = NULL; | |||
newctx->DofData_P0 = NULL; | newctx->DofData_P0 = NULL; | |||
newctx->GeoData_P0 = NULL; | newctx->GeoData_P0 = NULL; | |||
*shell = newctx; | *shell = newctx; | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
skipping to change at line 690 | skipping to change at line 700 | |||
ILSField MyField, AllField; | ILSField MyField, AllField; | |||
ILSMat *ctx; | ILSMat *ctx; | |||
char *LinearSystemType; | char *LinearSystemType; | |||
#ifdef TIMER | #ifdef TIMER | |||
double tBcast_start, tBcast_end; | double tBcast_start, tBcast_end; | |||
double tTreatment_start, tTreatment_end; | double tTreatment_start, tTreatment_end; | |||
double t_start = MPI_Wtime(), t_end; | double t_start = MPI_Wtime(), t_end; | |||
#endif | #endif | |||
_try(MatShellGetContext(A, (void**)&ctx)); | _try(MatShellGetContext(A, (void **)&ctx)); | |||
LinearSystemType = ctx->LinearSystemType; | LinearSystemType = ctx->LinearSystemType; | |||
// convert X to a std vector | // convert X to a std vector | |||
_try(PETSc_Vec_to_STD_Vec(X, ctx->MyField, &std_vec)); | _try(PETSc_Vec_to_STD_Vec(X, ctx->MyField, &std_vec)); | |||
// Update PViews | // Update PViews | |||
for (int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++){ | for(int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++) { | |||
PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | |||
view->getData()->fromVector(std_vec[cpt_view]); | view->getData()->fromVector(std_vec[cpt_view]); | |||
} | } | |||
// PVIEW BCAST | // PVIEW BCAST | |||
#ifdef TIMER | #ifdef TIMER | |||
tBcast_start = MPI_Wtime(); | tBcast_start = MPI_Wtime(); | |||
#endif | #endif | |||
PViewBCast(*(ctx->MyField), *(ctx->AllField)); | PViewBCast(*(ctx->MyField), *(ctx->AllField)); | |||
#ifdef TIMER | #ifdef TIMER | |||
tBcast_end = MPI_Wtime(); | tBcast_end = MPI_Wtime(); | |||
#endif | #endif | |||
// Getdp resolution (contained in the matrix context) | // Getdp resolution (contained in the matrix context) | |||
// Barrier to ensure that every process have the good data in RAM | // Barrier to ensure that every process have the good data in RAM | |||
#ifdef TIMER | #ifdef TIMER | |||
tTreatment_start = MPI_Wtime(); | tTreatment_start = MPI_Wtime(); | |||
#endif | #endif | |||
Treatment_Operation(ctx->Resolution_P, | Treatment_Operation( | |||
ctx->Operation_P->Case.IterativeLinearSolver.Operations_Ax | ctx->Resolution_P, | |||
, | ctx->Operation_P->Case.IterativeLinearSolver.Operations_Ax, ctx->DofData_P0, | |||
ctx->DofData_P0, | ctx->GeoData_P0, NULL, NULL); | |||
ctx->GeoData_P0, | ||||
NULL, NULL); | ||||
#ifdef TIMER | #ifdef TIMER | |||
tTreatment_end = MPI_Wtime(); | tTreatment_end = MPI_Wtime(); | |||
#endif | #endif | |||
// Extract the (std) vector from the (new) .pos files | // Extract the (std) vector from the (new) .pos files | |||
// This assumes that every process reads every .pos files | // This assumes that every process reads every .pos files | |||
for(int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++) { | for(int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++) { | |||
PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | |||
view->getData()->toVector(std_vec[cpt_view]); | view->getData()->toVector(std_vec[cpt_view]); | |||
} | } | |||
// Convert the obtained vector to a Petsc Vec | // Convert the obtained vector to a Petsc Vec | |||
_try(STD_vector_to_PETSc_Vec(std_vec, Y, ctx->MyField)); | _try(STD_vector_to_PETSc_Vec(std_vec, Y, ctx->MyField)); | |||
// Set Y = X - Y | // Set Y = X - Y | |||
if(!strcmp(LinearSystemType,"I-A")) | if(!strcmp(LinearSystemType, "I-A")) | |||
_try(VecAYPX(Y, -1.,X)); | _try(VecAYPX(Y, -1., X)); | |||
else if(!strcmp(LinearSystemType,"I+A")) | else if(!strcmp(LinearSystemType, "I+A")) | |||
_try(VecAYPX(Y, 1.,X)); | _try(VecAYPX(Y, 1., X)); | |||
#ifdef TIMER | #ifdef TIMER | |||
// time computation | // time computation | |||
t_end = MPI_Wtime(); | t_end = MPI_Wtime(); | |||
double t_MatMult, t_Bcast, t_Treatment; | double t_MatMult, t_Bcast, t_Treatment; | |||
t_MatMult = t_end - t_start; | t_MatMult = t_end - t_start; | |||
t_Bcast = tBcast_end - tBcast_start; | t_Bcast = tBcast_end - tBcast_start; | |||
t_Treatment = tTreatment_end - tTreatment_start; | t_Treatment = tTreatment_end - tTreatment_start; | |||
ctx->MyField->TimeTreatment.push_back(t_Treatment); | ctx->MyField->TimeTreatment.push_back(t_Treatment); | |||
ctx->MyField->TimeBcast.push_back(t_Bcast); | ctx->MyField->TimeBcast.push_back(t_Bcast); | |||
ctx->MyField->TimeIt.push_back(t_MatMult); | ctx->MyField->TimeIt.push_back(t_MatMult); | |||
Message::Info(3, "Processus %d ended iteration in %g seconds with %g for commu | Message::Info( | |||
nication", | 3, "Processus %d ended iteration in %g seconds with %g for communication", | |||
Message::GetCommRank(), t_MatMult, t_Bcast); | Message::GetCommRank(), t_MatMult, t_Bcast); | |||
#endif | #endif | |||
_try(PetscBarrier((PetscObject)PETSC_NULL)); | _try(PetscBarrier((PetscObject)PETSC_NULL)); | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Build the iteration matrix of the Matrix-free vector-product. | // Build the iteration matrix of the Matrix-free vector-product. | |||
// Used to, e.g., study eigenvalues of the operators | // Used to, e.g., study eigenvalues of the operators | |||
static PetscErrorCode BuildIterationMatrix(Mat A, Mat *IterationMatrix) | static PetscErrorCode BuildIterationMatrix(Mat A, Mat *IterationMatrix) | |||
{ | { | |||
const PetscScalar one = 1., zero = 0.; | const PetscScalar one = 1., zero = 0.; | |||
PetscInt n_proc, m,n, m_loc, n_loc; | PetscInt n_proc, m, n, m_loc, n_loc; | |||
PetscInt m_start, m_end, vec_m_start, vec_m_end; | PetscInt m_start, m_end, vec_m_start, vec_m_end; | |||
_try(MPI_Comm_size(PETSC_COMM_WORLD, &n_proc)); | _try(MPI_Comm_size(PETSC_COMM_WORLD, &n_proc)); | |||
_try(MatGetSize(A, &m, &n)); | _try(MatGetSize(A, &m, &n)); | |||
_try(MatCreate(PETSC_COMM_WORLD, IterationMatrix)); | _try(MatCreate(PETSC_COMM_WORLD, IterationMatrix)); | |||
_try(MatSetSizes((*IterationMatrix), PETSC_DECIDE, PETSC_DECIDE, m, n)); | _try(MatSetSizes((*IterationMatrix), PETSC_DECIDE, PETSC_DECIDE, m, n)); | |||
_try(MatSetType((*IterationMatrix), MATMPIAIJ)); | _try(MatSetType((*IterationMatrix), MATMPIAIJ)); | |||
_try(MatSetFromOptions((*IterationMatrix))); | _try(MatSetFromOptions((*IterationMatrix))); | |||
_try(MatSetUp((*IterationMatrix))); | _try(MatSetUp((*IterationMatrix))); | |||
_try(MatGetOwnershipRange((*IterationMatrix), &m_start, &m_end)); | _try(MatGetOwnershipRange((*IterationMatrix), &m_start, &m_end)); | |||
_try(MatGetLocalSize((*IterationMatrix), &m_loc, &n_loc)); | _try(MatGetLocalSize((*IterationMatrix), &m_loc, &n_loc)); | |||
_try(MatMPIAIJSetPreallocation((*IterationMatrix), m_loc, PETSC_NULL, | _try(MatMPIAIJSetPreallocation((*IterationMatrix), m_loc, PETSC_NULL, | |||
n-m_loc, PETSC_NULL)); | n - m_loc, PETSC_NULL)); | |||
std::vector<PetscInt> ix(m); | std::vector<PetscInt> ix(m); | |||
for(PetscInt i = 0; i<m; i++) | for(PetscInt i = 0; i < m; i++) ix[i] = m_start + i; | |||
ix[i] = m_start + i; | ||||
Vec ej, Aej; | Vec ej, Aej; | |||
_try(VecCreateSeq(PETSC_COMM_SELF, m, &ej)); | _try(VecCreateSeq(PETSC_COMM_SELF, m, &ej)); | |||
_try(VecDuplicate(ej, &Aej)); | _try(VecDuplicate(ej, &Aej)); | |||
_try(VecGetOwnershipRange(ej, &vec_m_start, &vec_m_end)); | _try(VecGetOwnershipRange(ej, &vec_m_start, &vec_m_end)); | |||
for(int cpt=0;cpt<n;cpt++){ | for(int cpt = 0; cpt < n; cpt++) { | |||
Message::Info(3, "Column number %d over %d", cpt, n-1); | Message::Info(3, "Column number %d over %d", cpt, n - 1); | |||
std::vector<PetscScalar> vec_temp(n); | std::vector<PetscScalar> vec_temp(n); | |||
_try(VecSet(ej, zero)); | _try(VecSet(ej, zero)); | |||
if(cpt >= vec_m_start && cpt<vec_m_end) | if(cpt >= vec_m_start && cpt < vec_m_end) | |||
_try(VecSetValues(ej, 1., &cpt, &one, INSERT_VALUES)); | _try(VecSetValues(ej, 1., &cpt, &one, INSERT_VALUES)); | |||
_try(VecAssemblyBegin(ej)); | _try(VecAssemblyBegin(ej)); | |||
_try(VecAssemblyEnd(ej)); | _try(VecAssemblyEnd(ej)); | |||
_try(MatMultILSMat(A, ej, Aej)); | _try(MatMultILSMat(A, ej, Aej)); | |||
// storing it in a Matrix | // storing it in a Matrix | |||
_try(VecGetValues(Aej, m_loc, &ix[0], &vec_temp[0])); | _try(VecGetValues(Aej, m_loc, &ix[0], &vec_temp[0])); | |||
_try(MatSetValues((*IterationMatrix), m_loc, &ix[0], 1, &cpt, &vec_temp[0], | _try(MatSetValues((*IterationMatrix), m_loc, &ix[0], 1, &cpt, &vec_temp[0], | |||
INSERT_VALUES)); | INSERT_VALUES)); | |||
if(cpt%100 == 0){ // flushing | if(cpt % 100 == 0) { // flushing | |||
_try(MatAssemblyBegin((*IterationMatrix), MAT_FLUSH_ASSEMBLY)); | _try(MatAssemblyBegin((*IterationMatrix), MAT_FLUSH_ASSEMBLY)); | |||
_try(MatAssemblyEnd((*IterationMatrix), MAT_FLUSH_ASSEMBLY)); | _try(MatAssemblyEnd((*IterationMatrix), MAT_FLUSH_ASSEMBLY)); | |||
} | } | |||
} | } | |||
_try(MatAssemblyBegin((*IterationMatrix), MAT_FINAL_ASSEMBLY)); | _try(MatAssemblyBegin((*IterationMatrix), MAT_FINAL_ASSEMBLY)); | |||
_try(MatAssemblyEnd((*IterationMatrix), MAT_FINAL_ASSEMBLY)); | _try(MatAssemblyEnd((*IterationMatrix), MAT_FINAL_ASSEMBLY)); | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Print Iteration Matrix into file_IterationMatrix.m (matlab reading) | // Print Iteration Matrix into file_IterationMatrix.m (matlab reading) | |||
static PetscErrorCode PrintMatrix(Mat A, const char* filename, const char* varna | static PetscErrorCode PrintMatrix(Mat A, const char *filename, | |||
me) | const char *varname) | |||
{ | { | |||
// This function is copy/paste of function LinAlg_PrintMatrix function located | // This function is copy/paste of function LinAlg_PrintMatrix function located | |||
// in Kernel/LinAlg_PETSC.cpp | // in Kernel/LinAlg_PETSC.cpp | |||
std::string tmp(filename); | std::string tmp(filename); | |||
PetscInt m,n; | PetscInt m, n; | |||
_try(PetscObjectSetName((PetscObject)A, varname)); | _try(PetscObjectSetName((PetscObject)A, varname)); | |||
// ASCII (if the matrix is not too large) | // ASCII (if the matrix is not too large) | |||
_try(MatGetSize(A, &m, &n)); | _try(MatGetSize(A, &m, &n)); | |||
PetscViewer viewer; | PetscViewer viewer; | |||
_try(PetscViewerASCIIOpen(PETSC_COMM_WORLD, filename, &viewer)); | _try(PetscViewerASCIIOpen(PETSC_COMM_WORLD, filename, &viewer)); | |||
_try(PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_MATLAB)); | _try(PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_MATLAB)); | |||
_try(MatView(A, viewer)); | _try(MatView(A, viewer)); | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(PetscViewerDestroy(&viewer)); | _try(PetscViewerDestroy(&viewer)); | |||
#else | #else | |||
_try(PetscViewerDestroy(viewer)); | _try(PetscViewerDestroy(viewer)); | |||
#endif | #endif | |||
// BINARY | // BINARY | |||
// Add the petscfolder/bin/matlab path to your matlab paths and | // Add the petscfolder/bin/matlab path to your matlab paths and | |||
// type the following command in matlab, for real arithmetic : | // type the following command in matlab, for real arithmetic : | |||
// A = PetscBinaryRead(filename) ; | // A = PetscBinaryRead(filename) ; | |||
// and for complex arithmetic : | // and for complex arithmetic : | |||
// A = PetscBinaryRead(filename , 'complex', true) ; | // A = PetscBinaryRead(filename , 'complex', true) ; | |||
PetscViewer viewer_bin; | PetscViewer viewer_bin; | |||
_try(PetscViewerBinaryOpen(PETSC_COMM_WORLD, (tmp + ".bin").c_str(), | _try(PetscViewerBinaryOpen(PETSC_COMM_WORLD, (tmp + ".bin").c_str(), | |||
FILE_MODE_WRITE, &viewer_bin)); | FILE_MODE_WRITE, &viewer_bin)); | |||
_try(PetscViewerSetFormat(viewer_bin, PETSC_VIEWER_DEFAULT)); | _try(PetscViewerSetFormat(viewer_bin, PETSC_VIEWER_DEFAULT)); | |||
_try(MatView(A, viewer_bin)); | _try(MatView(A, viewer_bin)); | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(PetscViewerDestroy(&viewer_bin)); | _try(PetscViewerDestroy(&viewer_bin)); | |||
#else | #else | |||
_try(PetscViewerDestroy(viewer_bin)); | _try(PetscViewerDestroy(viewer_bin)); | |||
#endif | #endif | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Print a SEQUENTIAL Petsc Vec into a Matlab File | // Print a SEQUENTIAL Petsc Vec into a Matlab File | |||
static PetscErrorCode PrintVecSeq(Vec b, const char* filename, const char* varna | static PetscErrorCode PrintVecSeq(Vec b, const char *filename, | |||
me) | const char *varname) | |||
{ | { | |||
std::string tmp(filename); | std::string tmp(filename); | |||
PetscViewer viewer, viewer_bin; | PetscViewer viewer, viewer_bin; | |||
_try(PetscObjectSetName((PetscObject)b, varname)); | _try(PetscObjectSetName((PetscObject)b, varname)); | |||
_try(PetscViewerASCIIOpen(PETSC_COMM_SELF, filename, &viewer)); | _try(PetscViewerASCIIOpen(PETSC_COMM_SELF, filename, &viewer)); | |||
_try(PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_MATLAB)); | _try(PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_MATLAB)); | |||
// see PrintMat function for the how-to use it | // see PrintMat function for the how-to use it | |||
_try(PetscViewerBinaryOpen(PETSC_COMM_SELF, (tmp + ".bin").c_str(), | _try(PetscViewerBinaryOpen(PETSC_COMM_SELF, (tmp + ".bin").c_str(), | |||
FILE_MODE_WRITE, &viewer_bin)); | FILE_MODE_WRITE, &viewer_bin)); | |||
_try(PetscViewerSetFormat(viewer_bin, PETSC_VIEWER_DEFAULT)); | _try(PetscViewerSetFormat(viewer_bin, PETSC_VIEWER_DEFAULT)); | |||
VecView(b, viewer); | VecView(b, viewer); | |||
VecView(b, viewer_bin); | VecView(b, viewer_bin); | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(PetscViewerDestroy(&viewer)); | _try(PetscViewerDestroy(&viewer)); | |||
_try(PetscViewerDestroy(&viewer_bin)); | _try(PetscViewerDestroy(&viewer_bin)); | |||
#else | #else | |||
_try(PetscViewerDestroy(viewer)); | _try(PetscViewerDestroy(viewer)); | |||
_try(PetscViewerDestroy(viewer_bin)); | _try(PetscViewerDestroy(viewer_bin)); | |||
#endif | #endif | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// Print a Petsc Vec into a Matlab File - FIXME: to be changed! | // Print a Petsc Vec into a Matlab File - FIXME: to be changed! | |||
static PetscErrorCode PrintVec(Vec b, const char* filename, const char* varname) | static PetscErrorCode PrintVec(Vec b, const char *filename, const char *varname) | |||
{ | { | |||
// This function is copy/paste of function LinAlg_PrintMatrix function | // This function is copy/paste of function LinAlg_PrintMatrix function | |||
// located in Kernel/LinAlg_PETSC.cpp | // located in Kernel/LinAlg_PETSC.cpp | |||
#if (PETSC_VERSION_MAJOR == 0) || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_ | #if(PETSC_VERSION_MAJOR == 0) || \ | |||
MINOR >= 4)) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 4)) | |||
const char *type = ""; | const char *type = ""; | |||
#else | #else | |||
const VecType type; | const VecType type; | |||
#endif | #endif | |||
_try(VecGetType(b, &type)); | _try(VecGetType(b, &type)); | |||
if(!strcmp(type, "seq")){ // AND NUM_PROC > 1 ! | if(!strcmp(type, "seq")) { // AND NUM_PROC > 1 ! | |||
_try(PrintVecSeq(b, filename, varname)); | _try(PrintVecSeq(b, filename, varname)); | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
PetscViewer viewer, viewer_bin; | PetscViewer viewer, viewer_bin; | |||
std::string tmp(filename); | std::string tmp(filename); | |||
_try(PetscObjectSetName((PetscObject)b, varname)); | _try(PetscObjectSetName((PetscObject)b, varname)); | |||
// ASCII | // ASCII | |||
_try(PetscViewerASCIIOpen(PETSC_COMM_WORLD, filename, &viewer)); | _try(PetscViewerASCIIOpen(PETSC_COMM_WORLD, filename, &viewer)); | |||
_try(PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_MATLAB)); | _try(PetscViewerSetFormat(viewer, PETSC_VIEWER_ASCII_MATLAB)); | |||
// see PrintMat function for the how-to use it | // see PrintMat function for the how-to use it | |||
_try(PetscViewerBinaryOpen(PETSC_COMM_WORLD, (tmp + ".bin").c_str(), | _try(PetscViewerBinaryOpen(PETSC_COMM_WORLD, (tmp + ".bin").c_str(), | |||
FILE_MODE_WRITE, &viewer_bin)); | FILE_MODE_WRITE, &viewer_bin)); | |||
_try(PetscViewerSetFormat(viewer_bin, PETSC_VIEWER_DEFAULT)); | _try(PetscViewerSetFormat(viewer_bin, PETSC_VIEWER_DEFAULT)); | |||
VecView(b, viewer); | VecView(b, viewer); | |||
VecView(b, viewer_bin); | VecView(b, viewer_bin); | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(PetscViewerDestroy(&viewer)); | _try(PetscViewerDestroy(&viewer)); | |||
_try(PetscViewerDestroy(&viewer_bin)); | _try(PetscViewerDestroy(&viewer_bin)); | |||
#else | #else | |||
_try(PetscViewerDestroy(viewer)); | _try(PetscViewerDestroy(viewer)); | |||
_try(PetscViewerDestroy(viewer_bin)); | _try(PetscViewerDestroy(viewer_bin)); | |||
#endif | #endif | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
static PetscErrorCode Jacobi_Solver(Mat A, Vec X, Vec B, double Tol, int MaxIter | static PetscErrorCode Jacobi_Solver(Mat A, Vec X, Vec B, double Tol, | |||
) | int MaxIter) | |||
{ | { | |||
Vec X_old, W; | Vec X_old, W; | |||
double residu, residuInit; | double residu, residuInit; | |||
_try(VecSet(X, 0.)); | _try(VecSet(X, 0.)); | |||
_try(VecDuplicate(X, &X_old)); | _try(VecDuplicate(X, &X_old)); | |||
_try(VecDuplicate(X, &W)); | _try(VecDuplicate(X, &W)); | |||
_try(VecCopy(X, W)); | _try(VecCopy(X, W)); | |||
_try(VecNorm(B, NORM_2, &residuInit)); | _try(VecNorm(B, NORM_2, &residuInit)); | |||
Message::Info(3, "Jacobi initial residual %g", residuInit); | Message::Info(3, "Jacobi initial residual %g", residuInit); | |||
Current.Iteration = 0; | Current.Iteration = 0; | |||
Current.KSPIteration = 0; | Current.KSPIteration = 0; | |||
Current.Residual = residuInit; | Current.Residual = residuInit; | |||
Current.KSPResidual = residuInit; | Current.KSPResidual = residuInit; | |||
for (int j=1; j < MaxIter; j++){ | for(int j = 1; j < MaxIter; j++) { | |||
_try(VecCopy(X, X_old)); | _try(VecCopy(X, X_old)); | |||
_try(MatMultILSMat(A, X_old, X)); | _try(MatMultILSMat(A, X_old, X)); | |||
_try(VecAYPX(X, 1.,B)); // X = X + B | _try(VecAYPX(X, 1., B)); // X = X + B | |||
//convergence test | // convergence test | |||
_try(VecWAXPY(W, -1.,X_old, X)); //W = X-X_old | _try(VecWAXPY(W, -1., X_old, X)); // W = X-X_old | |||
_try(VecNorm(W, NORM_2, &residu)); | _try(VecNorm(W, NORM_2, &residu)); | |||
Message::Info(3, "Jacobi iteration %d residual %g", j, residu); | Message::Info(3, "Jacobi iteration %d residual %g", j, residu); | |||
Current.Iteration = j; | Current.Iteration = j; | |||
Current.KSPIteration = j; | Current.KSPIteration = j; | |||
Current.Residual = residu; | Current.Residual = residu; | |||
Current.KSPResidual = residu; | Current.KSPResidual = residu; | |||
if(residu/residuInit < Tol) break; | if(residu / residuInit < Tol) break; | |||
} | } | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
// matrix-free preconditionning | // matrix-free preconditionning | |||
// Matrix-vector product for the preconditioning. Quite a copy/past of MatMultIL | // Matrix-vector product for the preconditioning. Quite a copy/past of | |||
SMat | // MatMultILSMat | |||
static PetscErrorCode MatMultPC(PC pc, Vec X, Vec Y) | static PetscErrorCode MatMultPC(PC pc, Vec X, Vec Y) | |||
{ | { | |||
std::vector<std::vector<std::vector<double> > > std_vec; | std::vector<std::vector<std::vector<double> > > std_vec; | |||
ILSField MyField, AllField; | ILSField MyField, AllField; | |||
ILSMat *ctx; | ILSMat *ctx; | |||
_try(PCShellGetContext(pc, (void**)&ctx)); | _try(PCShellGetContext(pc, (void **)&ctx)); | |||
//convert X to a std vector | // convert X to a std vector | |||
_try(PETSc_Vec_to_STD_Vec(X, ctx->MyField, &std_vec)); | _try(PETSc_Vec_to_STD_Vec(X, ctx->MyField, &std_vec)); | |||
// Update PViews | // Update PViews | |||
for (int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++){ | for(int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++) { | |||
PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | |||
view->getData()->fromVector(std_vec[cpt_view]); | view->getData()->fromVector(std_vec[cpt_view]); | |||
} | } | |||
// PVIEW BCAST ! | // PVIEW BCAST ! | |||
PViewBCast(*(ctx->MyField), *(ctx->AllField)); | PViewBCast(*(ctx->MyField), *(ctx->AllField)); | |||
// Getdp resolution (contained in the matrix context) | // Getdp resolution (contained in the matrix context) | |||
Treatment_Operation(ctx->Resolution_P, | Treatment_Operation( | |||
ctx->Operation_P->Case.IterativeLinearSolver.Operations_Mx | ctx->Resolution_P, | |||
, | ctx->Operation_P->Case.IterativeLinearSolver.Operations_Mx, ctx->DofData_P0, | |||
ctx->DofData_P0, | ctx->GeoData_P0, NULL, NULL); | |||
ctx->GeoData_P0, | ||||
NULL, NULL); | ||||
// Extract the (std) vector from the (new) .pos files | // Extract the (std) vector from the (new) .pos files | |||
// This assumes that every process reads every .pos files | // This assumes that every process reads every .pos files | |||
for(int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++) { | for(int cpt_view = 0; cpt_view < ctx->MyField->nb_field; cpt_view++) { | |||
PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | PView *view = GetViewByTag(ctx->MyField->GmshTag[cpt_view]); | |||
view->getData()->toVector(std_vec[cpt_view]); | view->getData()->toVector(std_vec[cpt_view]); | |||
} | } | |||
//Convert the obtained vector to a Petsc Vec | // Convert the obtained vector to a Petsc Vec | |||
_try(STD_vector_to_PETSc_Vec(std_vec, Y, ctx->MyField)); | _try(STD_vector_to_PETSc_Vec(std_vec, Y, ctx->MyField)); | |||
_try(PetscBarrier((PetscObject)PETSC_NULL)); | _try(PetscBarrier((PetscObject)PETSC_NULL)); | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
static int KspMonitor(KSP ksp, PetscInt it, PetscReal rnorm, void *mctx) | static int KspMonitor(KSP ksp, PetscInt it, PetscReal rnorm, void *mctx) | |||
{ | { | |||
Message::Cpu(3, false, true, true, true, false, "%3ld KSP Residual norm %14.12 | Message::Cpu(3, false, true, true, true, false, | |||
e", | "%3ld KSP Residual norm %14.12e", (long)it, rnorm); | |||
(long)it, rnorm); | ||||
Current.KSPIteration = it; | Current.KSPIteration = it; | |||
Current.KSPResidual = rnorm; | Current.KSPResidual = rnorm; | |||
return 0; | return 0; | |||
} | } | |||
int Operation_IterativeLinearSolver(struct Resolution *Resolution_P, | int Operation_IterativeLinearSolver(struct Resolution *Resolution_P, | |||
struct Operation *Operation_P, | struct Operation *Operation_P, | |||
struct DofData *DofData_P0, | struct DofData *DofData_P0, | |||
struct GeoData *GeoData_P0) | struct GeoData *GeoData_P0) | |||
{ | { | |||
int mpi_comm_size = Message::GetCommSize(); | int mpi_comm_size = Message::GetCommSize(); | |||
int mpi_comm_rank = Message::GetCommRank(); | int mpi_comm_rank = Message::GetCommRank(); | |||
ILSMat *ctx, *ctx_pc; // Matrix Shell context and PC context | ILSMat *ctx, *ctx_pc; // Matrix Shell context and PC context | |||
Mat A; | Mat A; | |||
KSP ksp; | KSP ksp; | |||
std::string Solver; | std::string Solver; | |||
int MaxIter, Restart; | int MaxIter, Restart; | |||
double Tol; | double Tol; | |||
std::vector<std::vector<std::vector<double> > > B_std; // rhs (std version) | std::vector<std::vector<std::vector<double> > > B_std; // rhs (std version) | |||
Vec B, X; // rhs and Solution | Vec B, X; // rhs and Solution | |||
PC pc; | PC pc; | |||
MPI_Comm ILSComm = PETSC_COMM_WORLD; // by default, KSP is launched in paralle | MPI_Comm ILSComm = | |||
l | PETSC_COMM_WORLD; // by default, KSP is launched in parallel | |||
char *LinearSystemType; | char *LinearSystemType; | |||
ILSField MyField, AllField; | ILSField MyField, AllField; | |||
#if defined(TIMER) | #if defined(TIMER) | |||
double time_total = 0.; | double time_total = 0.; | |||
double time_start = MPI_Wtime(); | double time_start = MPI_Wtime(); | |||
#endif | #endif | |||
// Initializing | // Initializing | |||
MPI_Barrier(PETSC_COMM_WORLD); | MPI_Barrier(PETSC_COMM_WORLD); | |||
Message::Info("Initializing Iterative Linear Solver"); | Message::Info("Initializing Iterative Linear Solver"); | |||
InitData(&MyField, &AllField, Operation_P, &B_std); | InitData(&MyField, &AllField, Operation_P, &B_std); | |||
// Print Information | // Print Information | |||
Tol = Operation_P->Case.IterativeLinearSolver.Tolerance; | Tol = Operation_P->Case.IterativeLinearSolver.Tolerance; | |||
MaxIter = Operation_P->Case.IterativeLinearSolver.MaxIter; | MaxIter = Operation_P->Case.IterativeLinearSolver.MaxIter; | |||
Restart = Operation_P->Case.IterativeLinearSolver.Restart; | Restart = Operation_P->Case.IterativeLinearSolver.Restart; | |||
Solver = Operation_P->Case.IterativeLinearSolver.Type; | Solver = Operation_P->Case.IterativeLinearSolver.Type; | |||
LinearSystemType = Operation_P->Case.IterativeLinearSolver.OpMatMult; | LinearSystemType = Operation_P->Case.IterativeLinearSolver.OpMatMult; | |||
if(strcmp(LinearSystemType, "I-A") && | if(strcmp(LinearSystemType, "I-A") && strcmp(LinearSystemType, "I+A") && | |||
strcmp(LinearSystemType, "I+A") && | strcmp(LinearSystemType, "A")) { | |||
strcmp(LinearSystemType, "A")){ | Message::Error( | |||
Message::Error("Linear system type \"%s\" unknown. Try \"A\", \"I-A\" or \"I | "Linear system type \"%s\" unknown. Try \"A\", \"I-A\" or \"I+A\".", | |||
+A\".", | LinearSystemType); | |||
LinearSystemType); | ||||
} | } | |||
Message::Info(3, "Linear system type: (%s)X = B", LinearSystemType); | Message::Info(3, "Linear system type: (%s)X = B", LinearSystemType); | |||
Message::Info(3, "Number of Processes: %d", mpi_comm_size); | Message::Info(3, "Number of Processes: %d", mpi_comm_size); | |||
Message::Info(3, "Iterative solver: %s", Solver.c_str()); | Message::Info(3, "Iterative solver: %s", Solver.c_str()); | |||
Message::Info(3, "Tolerance: %g", Tol); | Message::Info(3, "Tolerance: %g", Tol); | |||
Message::Info(3, "Max. numb. of iterations: %i", MaxIter); | Message::Info(3, "Max. numb. of iterations: %i", MaxIter); | |||
// if jacobi then MatMult(A,X) = A*X for linear system (I-A)*X=B | // if jacobi then MatMult(A,X) = A*X for linear system (I-A)*X=B | |||
if(Solver == "jacobi"){ | if(Solver == "jacobi") { | |||
if(strcmp(LinearSystemType, "I-A")) | if(strcmp(LinearSystemType, "I-A")) | |||
Message::Error("Jacobi method implemented only for linear system of type \ | Message::Error( | |||
"I-A\""); | "Jacobi method implemented only for linear system of type \"I-A\""); | |||
LinearSystemType = (char *)"A"; | LinearSystemType = (char *)"A"; | |||
} | } | |||
Message::Info(3, "Number of Fields: %d", AllField.nb_field); | Message::Info(3, "Number of Fields: %d", AllField.nb_field); | |||
if(ILSField::areNeighbor) | if(ILSField::areNeighbor) | |||
Message::Info(3, "Neighbors are specified: Fast exchange between process"); | Message::Info(3, "Neighbors are specified: Fast exchange between process"); | |||
for(int iField = 0; iField < AllField.nb_field; iField++) | for(int iField = 0; iField < AllField.nb_field; iField++) | |||
Message::Info(3, "Size of Field %d: %d (on CPU %d)", AllField.GmshTag[iField | Message::Info(3, "Size of Field %d: %d (on CPU %d)", | |||
], | AllField.GmshTag[iField], AllField.size[iField], | |||
AllField.size[iField], AllField.rank[iField]); | AllField.rank[iField]); | |||
Message::Info(3, "Total system size: %d", AllField.n_elem); | Message::Info(3, "Total system size: %d", AllField.n_elem); | |||
#if !defined(PETSC_USE_COMPLEX) | #if !defined(PETSC_USE_COMPLEX) | |||
if(Current.NbrHar == 2){ | if(Current.NbrHar == 2) { | |||
AllField.n_elem *= 2; | AllField.n_elem *= 2; | |||
MyField.n_elem *= 2; | MyField.n_elem *= 2; | |||
Message::Info(3, "PETSc REAL arithmetic: system size is doubled: n=%d", | Message::Info(3, "PETSc REAL arithmetic: system size is doubled: n=%d", | |||
AllField.n_elem); | AllField.n_elem); | |||
} | } | |||
#endif | #endif | |||
// Creating the vector/matrix | // Creating the vector/matrix | |||
// Petsc Vec of unknown | // Petsc Vec of unknown | |||
skipping to change at line 1092 | skipping to change at line 1112 | |||
// context of the shell matrix | // context of the shell matrix | |||
_try(CreateILSMat(&ctx)); | _try(CreateILSMat(&ctx)); | |||
_try(SetILSMat(&ctx, LinearSystemType, &MyField, &AllField, Resolution_P, | _try(SetILSMat(&ctx, LinearSystemType, &MyField, &AllField, Resolution_P, | |||
Operation_P, DofData_P0, GeoData_P0)); | Operation_P, DofData_P0, GeoData_P0)); | |||
// Shell matrix containg the indices of the unknown field (on which the | // Shell matrix containg the indices of the unknown field (on which the | |||
// iterative solver works) | // iterative solver works) | |||
_try(MatCreateShell(ILSComm, MyField.n_elem, MyField.n_elem, AllField.n_elem, | _try(MatCreateShell(ILSComm, MyField.n_elem, MyField.n_elem, AllField.n_elem, | |||
AllField.n_elem, ctx, &A)); | AllField.n_elem, ctx, &A)); | |||
_try(MatShellSetContext(A, ctx)); | _try(MatShellSetContext(A, ctx)); | |||
_try(MatShellSetOperation(A, MATOP_MULT, (void(*)(void))MatMultILSMat)); | _try(MatShellSetOperation(A, MATOP_MULT, (void (*)(void))MatMultILSMat)); | |||
_try(PetscBarrier((PetscObject)PETSC_NULL)); | _try(PetscBarrier((PetscObject)PETSC_NULL)); | |||
// Creation of the iterative solver + solving | // Creation of the iterative solver + solving | |||
if(Solver == "print"){ | if(Solver == "print") { | |||
// Print the iteration matrix | // Print the iteration matrix | |||
Message::Info(3, "Launching Print mode (no resolution):"); | Message::Info(3, "Launching Print mode (no resolution):"); | |||
Message::Info(3, "Building Iteration Matrix..."); | Message::Info(3, "Building Iteration Matrix..."); | |||
Mat IterationMatrix; | Mat IterationMatrix; | |||
_try(BuildIterationMatrix(A, &IterationMatrix)); | _try(BuildIterationMatrix(A, &IterationMatrix)); | |||
Message::Info(3, "Printing Iteration Matrix..."); | Message::Info(3, "Printing Iteration Matrix..."); | |||
_try(PrintMatrix(IterationMatrix, "file_mat_itmat.m", "IterationMatrix")); | _try(PrintMatrix(IterationMatrix, "file_mat_itmat.m", "IterationMatrix")); | |||
Message::Info(3, "Printing Right Hand Side..."); | Message::Info(3, "Printing Right Hand Side..."); | |||
_try(PrintVec(B, "file_vec_rhs.m", "RightHandSide")); | _try(PrintVec(B, "file_vec_rhs.m", "RightHandSide")); | |||
Message::Info(3, "done"); | Message::Info(3, "done"); | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSIO | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
N_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(VecDestroy(&X)); | _try(VecDestroy(&X)); | |||
_try(VecDestroy(&B)); | _try(VecDestroy(&B)); | |||
_try(MatDestroy(&A)); | _try(MatDestroy(&A)); | |||
#else | #else | |||
_try(VecDestroy(X)); | _try(VecDestroy(X)); | |||
_try(VecDestroy(B)); | _try(VecDestroy(B)); | |||
_try(MatDestroy(A)); | _try(MatDestroy(A)); | |||
#endif | #endif | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
else if(Solver == "jacobi"){ | else if(Solver == "jacobi") { | |||
_try(Jacobi_Solver(A, X, B, Tol, MaxIter)); | _try(Jacobi_Solver(A, X, B, Tol, MaxIter)); | |||
} | } | |||
else{ | else { | |||
// Krylov subspace solver | // Krylov subspace solver | |||
_try(KSPCreate(ILSComm,&ksp)); | _try(KSPCreate(ILSComm, &ksp)); | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 5))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 5))) | |||
_try(KSPSetOperators(ksp, A, A)); | _try(KSPSetOperators(ksp, A, A)); | |||
#else | #else | |||
_try(KSPSetOperators(ksp, A, A, DIFFERENT_NONZERO_PATTERN)); | _try(KSPSetOperators(ksp, A, A, DIFFERENT_NONZERO_PATTERN)); | |||
#endif | #endif | |||
_try(KSPSetTolerances(ksp, Tol, PETSC_DEFAULT, PETSC_DEFAULT, MaxIter)); | _try(KSPSetTolerances(ksp, Tol, PETSC_DEFAULT, PETSC_DEFAULT, MaxIter)); | |||
_try(KSPMonitorSet(ksp, KspMonitor, PETSC_NULL, PETSC_NULL)); | _try(KSPMonitorSet(ksp, KspMonitor, PETSC_NULL, PETSC_NULL)); | |||
//Preconditioning | // Preconditioning | |||
bool pcright = true; | bool pcright = true; | |||
std::string match = "_pcleft"; | std::string match = "_pcleft"; | |||
int pos = (int)Solver.find(match.c_str()); | int pos = (int)Solver.find(match.c_str()); | |||
if(pos != (int)std::string::npos){ | if(pos != (int)std::string::npos) { | |||
Solver.replace(pos, match.size(), ""); | Solver.replace(pos, match.size(), ""); | |||
pcright = false; | pcright = false; | |||
} | } | |||
_try(KSPGetPC(ksp, &pc)); | _try(KSPGetPC(ksp, &pc)); | |||
// check if a preconditioner is specified | // check if a preconditioner is specified | |||
int nb_pc = List_Nbr(Operation_P->Case.IterativeLinearSolver.Operations_Mx); | int nb_pc = List_Nbr(Operation_P->Case.IterativeLinearSolver.Operations_Mx); | |||
if(nb_pc == 0) { | if(nb_pc == 0) { _try(PCSetType(pc, PCNONE)); } | |||
_try(PCSetType(pc, PCNONE)); | else { | |||
} | Message::Info(3, "%s preconditioner detected", | |||
else{ | pcright ? "Right" : "Left"); | |||
Message::Info(3, "%s preconditioner detected", pcright ? "Right" : "Left") | ||||
; | ||||
// context of the shell PC | // context of the shell PC | |||
_try(CreateILSMat(&ctx_pc)); | _try(CreateILSMat(&ctx_pc)); | |||
_try(SetILSMat(&ctx_pc, LinearSystemType, &MyField, &AllField, Resolution_ | _try(SetILSMat(&ctx_pc, LinearSystemType, &MyField, &AllField, | |||
P, | Resolution_P, Operation_P, DofData_P0, GeoData_P0)); | |||
Operation_P, DofData_P0, GeoData_P0)); | ||||
// Shell PC | // Shell PC | |||
_try(PCSetType(pc,PCSHELL)); | _try(PCSetType(pc, PCSHELL)); | |||
_try(PCShellSetContext(pc, ctx_pc)); | _try(PCShellSetContext(pc, ctx_pc)); | |||
_try(PCShellSetApply(pc, MatMultPC)); | _try(PCShellSetApply(pc, MatMultPC)); | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(KSPSetPCSide(ksp, pcright ? PC_RIGHT : PC_LEFT)); | _try(KSPSetPCSide(ksp, pcright ? PC_RIGHT : PC_LEFT)); | |||
#else | #else | |||
_try(KSPSetPreconditionerSide(ksp, pcright ? PC_RIGHT : PC_LEFT)); | _try(KSPSetPreconditionerSide(ksp, pcright ? PC_RIGHT : PC_LEFT)); | |||
#endif | #endif | |||
} | } | |||
_try(KSPSetType(ksp, Solver.c_str())); | _try(KSPSetType(ksp, Solver.c_str())); | |||
if(Restart > 0 && Solver.find("gmres") != std::string::npos){ | if(Restart > 0 && Solver.find("gmres") != std::string::npos) { | |||
_try(KSPGMRESSetRestart(ksp, Restart)); | _try(KSPGMRESSetRestart(ksp, Restart)); | |||
Message::Info(3, "GMRES Restart: %i", Restart); | Message::Info(3, "GMRES Restart: %i", Restart); | |||
} | } | |||
if(Restart > 0 && Solver.find("bcgsl") != std::string::npos){ | if(Restart > 0 && Solver.find("bcgsl") != std::string::npos) { | |||
_try(KSPBCGSLSetEll(ksp, Restart)); | _try(KSPBCGSLSetEll(ksp, Restart)); | |||
Message::Info(3, "BiCGL Ell: %i", Restart); | Message::Info(3, "BiCGL Ell: %i", Restart); | |||
} | } | |||
// set ksp | // set ksp | |||
_try(KSPSetFromOptions(ksp)); | _try(KSPSetFromOptions(ksp)); | |||
// Solve | // Solve | |||
_try(KSPSolve(ksp, B, X)); | _try(KSPSolve(ksp, B, X)); | |||
_try(KSPView(ksp, PETSC_VIEWER_STDOUT_WORLD)); | _try(KSPView(ksp, PETSC_VIEWER_STDOUT_WORLD)); | |||
PetscInt its; | PetscInt its; | |||
_try(KSPGetIterationNumber(ksp, &its)); | _try(KSPGetIterationNumber(ksp, &its)); | |||
Current.KSPIterations = its; | Current.KSPIterations = its; | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(KSPDestroy(&ksp)); | _try(KSPDestroy(&ksp)); | |||
#else | #else | |||
_try(KSPDestroy(ksp)); | _try(KSPDestroy(ksp)); | |||
#endif | #endif | |||
} | } | |||
// computing solution | // computing solution | |||
// we reuse B_std to avoid the creation of a new std::vector ... | // we reuse B_std to avoid the creation of a new std::vector ... | |||
_try(PETSc_Vec_to_STD_Vec(X, &MyField, &B_std)); | _try(PETSc_Vec_to_STD_Vec(X, &MyField, &B_std)); | |||
// update views | // update views | |||
for (int cpt_view = 0 ; cpt_view < MyField.nb_field; cpt_view++){ | for(int cpt_view = 0; cpt_view < MyField.nb_field; cpt_view++) { | |||
PView *view = GetViewByTag(MyField.GmshTag[cpt_view]); | PView *view = GetViewByTag(MyField.GmshTag[cpt_view]); | |||
view->getData()->fromVector(B_std[cpt_view]); | view->getData()->fromVector(B_std[cpt_view]); | |||
} | } | |||
// Transfer PView | // Transfer PView | |||
#ifdef TIMER | #ifdef TIMER | |||
double tbcast_start = MPI_Wtime(); | double tbcast_start = MPI_Wtime(); | |||
#endif | #endif | |||
PViewBCast(MyField, AllField); | PViewBCast(MyField, AllField); | |||
#ifdef TIMER | #ifdef TIMER | |||
double tbcast_end = MPI_Wtime(); | double tbcast_end = MPI_Wtime(); | |||
double t_bcast = tbcast_end - tbcast_start; | double t_bcast = tbcast_end - tbcast_start; | |||
Message::Info(3, "Process %d: tbcast = %g", mpi_comm_rank, t_bcast); | Message::Info(3, "Process %d: tbcast = %g", mpi_comm_rank, t_bcast); | |||
#endif | #endif | |||
// cleaning | // cleaning | |||
#if (PETSC_VERSION_RELEASE == 0 || ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION | #if(PETSC_VERSION_RELEASE == 0 || \ | |||
_MINOR >= 2))) | ((PETSC_VERSION_MAJOR == 3) && (PETSC_VERSION_MINOR >= 2))) | |||
_try(VecDestroy(&X)); | _try(VecDestroy(&X)); | |||
_try(VecDestroy(&B)); | _try(VecDestroy(&B)); | |||
_try(MatDestroy(&A)); | _try(MatDestroy(&A)); | |||
#else | #else | |||
_try(VecDestroy(X)); | _try(VecDestroy(X)); | |||
_try(VecDestroy(B)); | _try(VecDestroy(B)); | |||
_try(MatDestroy(A)); | _try(MatDestroy(A)); | |||
#endif | #endif | |||
#ifdef TIMER | #ifdef TIMER | |||
time_total = MPI_Wtime() - time_start; | time_total = MPI_Wtime() - time_start; | |||
#endif | #endif | |||
if(MyField.TimeBcast.size()){ | if(MyField.TimeBcast.size()) { | |||
// CPU Times | // CPU Times | |||
double aver_it = 0, aver_com = 0; | double aver_it = 0, aver_com = 0; | |||
char filename[50]; | char filename[50]; | |||
FILE *fid; | FILE *fid; | |||
sprintf(filename, "log_cpu_%d", mpi_comm_rank); | sprintf(filename, "log_cpu_%d", mpi_comm_rank); | |||
fid = FOpen(filename, "w"); | fid = FOpen(filename, "w"); | |||
fprintf(fid, "Process rank %d\n", mpi_comm_rank); | fprintf(fid, "Process rank %d\n", mpi_comm_rank); | |||
fprintf(fid, "it. CPU Total \t ... Treatment \t ... Communication\n"); | fprintf(fid, "it. CPU Total \t ... Treatment \t ... Communication\n"); | |||
for (unsigned int i = 0; i < MyField.TimeBcast.size() ; i ++){ | for(unsigned int i = 0; i < MyField.TimeBcast.size(); i++) { | |||
fprintf(fid, "%d \t%g\t %g\t %g\t (%g%%)\n", i+1, MyField.TimeIt[i], | fprintf(fid, "%d \t%g\t %g\t %g\t (%g%%)\n", i + 1, MyField.TimeIt[i], | |||
MyField.TimeTreatment[i], MyField.TimeBcast[i], | MyField.TimeTreatment[i], MyField.TimeBcast[i], | |||
MyField.TimeBcast[i]/MyField.TimeIt[i]*100); | MyField.TimeBcast[i] / MyField.TimeIt[i] * 100); | |||
aver_com += MyField.TimeBcast[i]/MyField.TimeBcast.size(); | aver_com += MyField.TimeBcast[i] / MyField.TimeBcast.size(); | |||
aver_it += MyField.TimeIt[i]/MyField.TimeIt.size(); | aver_it += MyField.TimeIt[i] / MyField.TimeIt.size(); | |||
} | } | |||
fprintf(fid, "Average: %g %g\n", aver_it, aver_com); | fprintf(fid, "Average: %g %g\n", aver_it, aver_com); | |||
fprintf(fid, "Percent of communication in average: %g%%\n", aver_com/aver_it | fprintf(fid, "Percent of communication in average: %g%%\n", | |||
*100); | aver_com / aver_it * 100); | |||
fclose(fid); | fclose(fid); | |||
#ifdef TIMER | #ifdef TIMER | |||
Message::Info(3, "Processus %d: ended in %g", mpi_comm_rank, time_total); | Message::Info(3, "Processus %d: ended in %g", mpi_comm_rank, time_total); | |||
Message::Info(3, "Processus %d: Average iteration time %g with %g for commun | Message::Info(3, | |||
ication (%g%%)", | "Processus %d: Average iteration time %g with %g for " | |||
mpi_comm_rank, aver_it, aver_com, aver_com/aver_it*100); | "communication (%g%%)", | |||
mpi_comm_rank, aver_it, aver_com, aver_com / aver_it * 100); | ||||
#endif | #endif | |||
} | } | |||
// reset pointers to static fields | // reset pointers to static fields | |||
MyStaticField = AllStaticField = 0; | MyStaticField = AllStaticField = 0; | |||
_try(PetscBarrier((PetscObject)PETSC_NULL)); | _try(PetscBarrier((PetscObject)PETSC_NULL)); | |||
PetscFunctionReturn(0); | PetscFunctionReturn(0); | |||
} | } | |||
int Operation_BroadcastFields(struct Resolution *Resolution_P, | int Operation_BroadcastFields(struct Resolution *Resolution_P, | |||
struct Operation *Operation_P, | struct Operation *Operation_P, | |||
struct DofData *DofData_P0, | struct DofData *DofData_P0, | |||
struct GeoData *GeoData_P0) | struct GeoData *GeoData_P0) | |||
{ | { | |||
if(!MyStaticField || !AllStaticField){ | if(!MyStaticField || !AllStaticField) { | |||
// we are not in Operation_IterativeLinearSolver: call the new, generic | // we are not in Operation_IterativeLinearSolver: call the new, generic | |||
// BroadcastFields operation: | // BroadcastFields operation: | |||
return Operation_BroadcastFieldsGeneric(Resolution_P, Operation_P, | return Operation_BroadcastFieldsGeneric(Resolution_P, Operation_P, | |||
DofData_P0, GeoData_P0); | DofData_P0, GeoData_P0); | |||
} | } | |||
// in the IterativeLinearSolver-specific BroadCastFields operation, the list | // in the IterativeLinearSolver-specific BroadCastFields operation, the list | |||
// of view tags contains the list of views to *not* broadcast | // of view tags contains the list of views to *not* broadcast | |||
std::set<int> fieldsToSkip; | std::set<int> fieldsToSkip; | |||
for(int i = 0; i < List_Nbr(Operation_P->Case.BroadcastFields.ViewTags); i++){ | for(int i = 0; i < List_Nbr(Operation_P->Case.BroadcastFields.ViewTags); | |||
i++) { | ||||
double j; | double j; | |||
List_Read(Operation_P->Case.BroadcastFields.ViewTags, i, &j); | List_Read(Operation_P->Case.BroadcastFields.ViewTags, i, &j); | |||
fieldsToSkip.insert((int) j); | fieldsToSkip.insert((int)j); | |||
} | } | |||
PViewBCast(*MyStaticField, *AllStaticField, fieldsToSkip); | PViewBCast(*MyStaticField, *AllStaticField, fieldsToSkip); | |||
return 0; | return 0; | |||
} | } | |||
#else | #else | |||
int Operation_IterativeLinearSolver(struct Resolution *Resolution_P, | int Operation_IterativeLinearSolver(struct Resolution *Resolution_P, | |||
struct Operation *Operation_P, | struct Operation *Operation_P, | |||
struct DofData *DofData_P0, | struct DofData *DofData_P0, | |||
struct GeoData *GeoData_P0) | struct GeoData *GeoData_P0) | |||
{ | { | |||
Message::Error("IterativeLinearSolver requires PETSc and Gmsh"); | Message::Error("IterativeLinearSolver requires PETSc and Gmsh"); | |||
return 0; | return 0; | |||
} | } | |||
int Operation_BroadcastFields(struct Resolution *Resolution_P, | int Operation_BroadcastFields(struct Resolution *Resolution_P, | |||
struct Operation *Operation_P, | struct Operation *Operation_P, | |||
struct DofData *DofData_P0, | struct DofData *DofData_P0, | |||
struct GeoData *GeoData_P0) | struct GeoData *GeoData_P0) | |||
{ | { | |||
return Operation_BroadcastFieldsGeneric(Resolution_P, Operation_P, | return Operation_BroadcastFieldsGeneric(Resolution_P, Operation_P, DofData_P0, | |||
DofData_P0, GeoData_P0); | GeoData_P0); | |||
} | } | |||
#endif | #endif | |||
End of changes. 176 change blocks. | ||||
317 lines changed or deleted | 299 lines changed or added |