"Fossies" - the Fresh Open Source Software Archive

Member "dune-istl-2.7.1/dune/istl/novlpschwarz.hh" (26 Nov 2020, 13526 Bytes) of package /linux/misc/dune/dune-istl-2.7.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "novlpschwarz.hh" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 2.6.0_vs_2.7.0.

    1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
    2 // vi: set et ts=4 sw=2 sts=2:
    3 #ifndef DUNE_ISTL_NOVLPSCHWARZ_HH
    4 #define DUNE_ISTL_NOVLPSCHWARZ_HH
    5 
    6 #include <iostream>              // for input/output to shell
    7 #include <fstream>               // for input/output to files
    8 #include <vector>                // STL vector class
    9 #include <sstream>
   10 
   11 #include <cmath>                // Yes, we do some math here
   12 
   13 #include <dune/common/timer.hh>
   14 
   15 #include <dune/common/hybridutilities.hh>
   16 
   17 #include "io.hh"
   18 #include "bvector.hh"
   19 #include "vbvector.hh"
   20 #include "bcrsmatrix.hh"
   21 #include "io.hh"
   22 #include "gsetc.hh"
   23 #include "ilu.hh"
   24 #include "operators.hh"
   25 #include "solvers.hh"
   26 #include "preconditioners.hh"
   27 #include "scalarproducts.hh"
   28 #include "owneroverlapcopy.hh"
   29 
   30 namespace Dune {
   31 
   32   /**
   33    * @defgroup ISTL_Parallel Parallel Solvers
   34    * @ingroup ISTL_Solvers
   35    * Instead of using parallel data structures (matrices and vectors) that
   36    * (implicitly) know the data distribution and communication patterns,
   37    * there is a clear separation of the parallel data composition together
   38    *  with the communication APIs from the data structures. This allows for
   39    * implementing overlapping and nonoverlapping domain decompositions as
   40    * well as data parallel parallelisation approaches.
   41    *
   42    * The \ref ISTL_Solvers "solvers" can easily be turned into parallel solvers
   43    * initializing them with matching parallel subclasses of the base classes
   44    * ScalarProduct, Preconditioner and LinearOperator.
   45    *
   46    * The information of the data distribution is provided by OwnerOverlapCopyCommunication
   47    * of \ref ISTL_Comm "communication API".
   48    *
   49    * Currently only data parallel versions are shipped with dune-istl. Domain
   50    * decomposition can be found in module dune-dd.
   51    */
   52   /**
   53      @addtogroup ISTL_Operators
   54      @{
   55    */
   56 
   57   /**
   58    * @brief A nonoverlapping operator with communication object.
   59    */
   60   template<class M, class X, class Y, class C>
   61   class NonoverlappingSchwarzOperator : public AssembledLinearOperator<M,X,Y>
   62   {
   63   public:
   64     //! \brief The type of the matrix we operate on.
   65     typedef M matrix_type;
   66     //! \brief The type of the domain.
   67     typedef X domain_type;
   68     //! \brief The type of the range.
   69     typedef Y range_type;
   70     //! \brief The field type of the range
   71     typedef typename X::field_type field_type;
   72     //! \brief The type of the communication object
   73     typedef C communication_type;
   74 
   75     typedef typename C::PIS PIS;
   76     typedef typename C::RI RI;
   77     typedef typename RI::RemoteIndexList RIL;
   78     typedef typename RI::const_iterator RIIterator;
   79     typedef typename RIL::const_iterator RILIterator;
   80     typedef typename M::ConstColIterator ColIterator;
   81     typedef typename M::ConstRowIterator RowIterator;
   82     typedef std::multimap<int,int> MM;
   83     typedef std::multimap<int,std::pair<int,RILIterator> > RIMap;
   84     typedef typename RIMap::iterator RIMapit;
   85 
   86     /**
   87      * @brief constructor: just store a reference to a matrix.
   88      *
   89      * @param A The assembled matrix.
   90      * @param com The communication object for syncing owner and copy
   91      * data points. (E.~g. OwnerOverlapCommunication )
   92      */
   93     NonoverlappingSchwarzOperator (const matrix_type& A, const communication_type& com)
   94       : _A_(stackobject_to_shared_ptr(A)), communication(com), buildcomm(true)
   95     {}
   96 
   97     NonoverlappingSchwarzOperator (std::shared_ptr<const matrix_type> A, const communication_type& com)
   98       : _A_(A), communication(com), buildcomm(true)
   99     {}
  100 
  101     //! apply operator to x:  \f$ y = A(x) \f$
  102     virtual void apply (const X& x, Y& y) const
  103     {
  104       y = 0;
  105       novlp_op_apply(x,y,1);
  106       communication.addOwnerCopyToOwnerCopy(y,y);
  107     }
  108 
  109     //! apply operator to x, scale and add:  \f$ y = y + \alpha A(x) \f$
  110     virtual void applyscaleadd (field_type alpha, const X& x, Y& y) const
  111     {
  112       // only apply communication to alpha*A*x to make it consistent,
  113       // y already has to be consistent.
  114       Y y1(y);
  115       y = 0;
  116       novlp_op_apply(x,y,alpha);
  117       communication.addOwnerCopyToOwnerCopy(y,y);
  118       y += y1;
  119     }
  120 
  121     //! get matrix via *
  122     virtual const matrix_type& getmat () const
  123     {
  124       return *_A_;
  125     }
  126 
  127     void novlp_op_apply (const X& x, Y& y, field_type alpha) const
  128     {
  129       //get index sets
  130       const PIS& pis=communication.indexSet();
  131       const RI& ri = communication.remoteIndices();
  132 
  133       // at the beginning make a multimap "bordercontribution".
  134       // process has i and j as border dofs but is not the owner
  135       // => only contribute to Ax if i,j is in bordercontribution
  136       if (buildcomm == true) {
  137 
  138         // set up mask vector
  139         if (mask.size()!=static_cast<typename std::vector<double>::size_type>(x.size())) {
  140           mask.resize(x.size());
  141           for (typename std::vector<double>::size_type i=0; i<mask.size(); i++)
  142             mask[i] = 1;
  143           for (typename PIS::const_iterator i=pis.begin(); i!=pis.end(); ++i)
  144             if (i->local().attribute()==OwnerOverlapCopyAttributeSet::copy)
  145               mask[i->local().local()] = 0;
  146             else if (i->local().attribute()==OwnerOverlapCopyAttributeSet::overlap)
  147               mask[i->local().local()] = 2;
  148         }
  149 
  150         for (MM::iterator iter = bordercontribution.begin();
  151              iter != bordercontribution.end(); ++iter)
  152           bordercontribution.erase(iter);
  153         std::map<int,int> owner; //key: local index i, value: process, that owns i
  154         RIMap rimap;
  155 
  156         // for each local index make multimap rimap:
  157         // key: local index i, data: pair of process that knows i and pointer to RI entry
  158         for (RowIterator i = _A_->begin(); i != _A_->end(); ++i)
  159           if (mask[i.index()] == 0)
  160             for (RIIterator remote = ri.begin(); remote != ri.end(); ++remote) {
  161               RIL& ril = *(remote->second.first);
  162               for (RILIterator rindex = ril.begin(); rindex != ril.end(); ++rindex)
  163                 if (rindex->attribute() != OwnerOverlapCopyAttributeSet::overlap)
  164                   if (rindex->localIndexPair().local().local() == i.index()) {
  165                     rimap.insert
  166                       (std::make_pair(i.index(),
  167                                       std::pair<int,RILIterator>(remote->first, rindex)));
  168                     if(rindex->attribute()==OwnerOverlapCopyAttributeSet::owner)
  169                       owner.insert(std::make_pair(i.index(),remote->first));
  170                   }
  171             }
  172 
  173         int iowner = 0;
  174         for (RowIterator i = _A_->begin(); i != _A_->end(); ++i) {
  175           if (mask[i.index()] == 0) {
  176             std::map<int,int>::iterator it = owner.find(i.index());
  177             iowner = it->second;
  178             std::pair<RIMapit, RIMapit> foundiit = rimap.equal_range(i.index());
  179             for (ColIterator j = (*_A_)[i.index()].begin(); j != (*_A_)[i.index()].end(); ++j) {
  180               if (mask[j.index()] == 0) {
  181                 bool flag = true;
  182                 for (RIMapit foundi = foundiit.first; foundi != foundiit.second; ++foundi) {
  183                   std::pair<RIMapit, RIMapit> foundjit = rimap.equal_range(j.index());
  184                   for (RIMapit foundj = foundjit.first; foundj != foundjit.second; ++foundj)
  185                     if (foundj->second.first == foundi->second.first)
  186                       if (foundj->second.second->attribute() == OwnerOverlapCopyAttributeSet::owner
  187                           || foundj->second.first == iowner
  188                           || foundj->second.first  < communication.communicator().rank()) {
  189                         flag = false;
  190                         continue;
  191                       }
  192                   if (flag == false)
  193                     continue;
  194                 }
  195                 // donĀ“t contribute to Ax if
  196                 // 1. the owner of j has i as interior/border dof
  197                 // 2. iowner has j as interior/border dof
  198                 // 3. there is another process with smaller rank that has i and j
  199                 // as interor/border dofs
  200                 // if the owner of j does not have i as interior/border dof,
  201                 // it will not be taken into account
  202                 if (flag==true)
  203                   bordercontribution.insert(std::pair<int,int>(i.index(),j.index()));
  204               }
  205             }
  206           }
  207         }
  208         buildcomm = false;
  209       }
  210 
  211       //compute alpha*A*x nonoverlapping case
  212       for (RowIterator i = _A_->begin(); i != _A_->end(); ++i) {
  213         if (mask[i.index()] == 0) {
  214           //dof doesn't belong to process but is border (not ghost)
  215           for (ColIterator j = (*_A_)[i.index()].begin(); j != (*_A_)[i.index()].end(); ++j) {
  216             if (mask[j.index()] == 1) //j is owner => then sum entries
  217               Impl::asMatrix(*j).usmv(alpha,x[j.index()],y[i.index()]);
  218             else if (mask[j.index()] == 0) {
  219               std::pair<MM::iterator, MM::iterator> itp =
  220                 bordercontribution.equal_range(i.index());
  221               for (MM::iterator it = itp.first; it != itp.second; ++it)
  222                 if ((*it).second == (int)j.index())
  223                   Impl::asMatrix(*j).usmv(alpha,x[j.index()],y[i.index()]);
  224             }
  225           }
  226         }
  227         else if (mask[i.index()] == 1) {
  228           for (ColIterator j = (*_A_)[i.index()].begin(); j != (*_A_)[i.index()].end(); ++j)
  229             if (mask[j.index()] != 2)
  230               Impl::asMatrix(*j).usmv(alpha,x[j.index()],y[i.index()]);
  231         }
  232       }
  233     }
  234 
  235     //! Category of the linear operator (see SolverCategory::Category)
  236     virtual SolverCategory::Category category() const
  237     {
  238       return SolverCategory::nonoverlapping;
  239     }
  240 
  241   private:
  242     std::shared_ptr<const matrix_type> _A_;
  243     const communication_type& communication;
  244     mutable bool buildcomm;
  245     mutable std::vector<double> mask;
  246     mutable std::multimap<int,int>  bordercontribution;
  247   };
  248 
  249   /** @} */
  250 
  251   namespace Amg
  252   {
  253     template<class T> class ConstructionTraits;
  254   }
  255 
  256   /**
  257    * @addtogroup ISTL_Prec
  258    * @{
  259    */
  260 
  261   /**
  262    * @brief Nonoverlapping parallel preconditioner.
  263    *
  264    * This is essentially a wrapper that take a sequential
  265    * preconditoner. In each step the sequential preconditioner
  266    * is applied and then all owner data points are updated on
  267    * all other processes.
  268    */
  269 
  270   template<class C, class P>
  271   class NonoverlappingBlockPreconditioner
  272     : public Preconditioner<typename P::domain_type,typename P::range_type> {
  273     friend class Amg::ConstructionTraits<NonoverlappingBlockPreconditioner<C,P> >;
  274     using X = typename P::domain_type;
  275     using Y = typename P::range_type;
  276   public:
  277     //! \brief The domain type of the preconditioner.
  278     typedef typename P::domain_type domain_type;
  279     //! \brief The range type of the preconditioner.
  280     typedef typename P::range_type range_type;
  281     //! \brief The type of the communication object.
  282     typedef C communication_type;
  283 
  284     /*! \brief Constructor.
  285 
  286        constructor gets all parameters to operate the prec.
  287        \param prec The sequential preconditioner.
  288        \param c The communication object for syncing owner and copy
  289        data points. (E.~g. OwnerOverlapCommunication )
  290      */
  291     /*! \brief Constructor.
  292 
  293        constructor gets all parameters to operate the prec.
  294        \param p The sequential preconditioner.
  295        \param c The communication object for syncing overlap and copy
  296        data points. (E.~g. OwnerOverlapCopyCommunication )
  297      */
  298     NonoverlappingBlockPreconditioner (P& p, const communication_type& c)
  299       : _preconditioner(stackobject_to_shared_ptr(p)), _communication(c)
  300     {   }
  301 
  302     /*! \brief Constructor.
  303 
  304        constructor gets all parameters to operate the prec.
  305        \param p The sequential preconditioner.
  306        \param c The communication object for syncing overlap and copy
  307        data points. (E.~g. OwnerOverlapCopyCommunication )
  308      */
  309     NonoverlappingBlockPreconditioner (const std::shared_ptr<P>& p, const communication_type& c)
  310       : _preconditioner(p), _communication(c)
  311     {   }
  312 
  313     /*!
  314        \brief Prepare the preconditioner.
  315 
  316        \copydoc Preconditioner::pre(domain_type&,range_type&)
  317      */
  318     virtual void pre (domain_type& x, range_type& b)
  319     {
  320       _preconditioner->pre(x,b);
  321     }
  322 
  323     /*!
  324        \brief Apply the preconditioner
  325 
  326        \copydoc Preconditioner::apply(domain_type&,const range_type&)
  327      */
  328     virtual void apply (domain_type& v, const range_type& d)
  329     {
  330       // block preconditioner equivalent to WrappedPreconditioner from
  331       // pdelab/backend/ovlpistsolverbackend.hh,
  332       // but not to BlockPreconditioner from schwarz.hh
  333       _preconditioner->apply(v,d);
  334       _communication.addOwnerCopyToOwnerCopy(v,v);
  335     }
  336 
  337     template<bool forward>
  338     void apply (X& v, const Y& d)
  339     {
  340       _preconditioner->template apply<forward>(v,d);
  341       _communication.addOwnerCopyToOwnerCopy(v,v);
  342     }
  343 
  344     /*!
  345        \brief Clean up.
  346 
  347        \copydoc Preconditioner::post(domain_type&)
  348      */
  349     virtual void post (domain_type& x)
  350     {
  351       _preconditioner->post(x);
  352     }
  353 
  354     //! Category of the preconditioner (see SolverCategory::Category)
  355     virtual SolverCategory::Category category() const
  356     {
  357       return SolverCategory::nonoverlapping;
  358     }
  359 
  360   private:
  361     //! \brief a sequential preconditioner
  362     std::shared_ptr<P> _preconditioner;
  363 
  364     //! \brief the communication object
  365     const communication_type& _communication;
  366   };
  367 
  368   /** @} end documentation */
  369 
  370 } // end namespace
  371 
  372 #endif