Poster of Linux kernelThe best gift for a Linux geek
ParmetisPartitioner

ParmetisPartitioner

Section: C Library Functions (3) Updated: Thu Apr 7 2011
Local index Up
 

NAME

ParmetisPartitioner -  

SYNOPSIS


#include <parmetis_partitioner.h>

Inherits Partitioner.  

Public Member Functions


ParmetisPartitioner ()

virtual AutoPtr< Partitioner > clone () const

void partition (MeshBase &mesh, const unsigned int n=libMesh::n_processors())

void repartition (MeshBase &mesh, const unsigned int n=libMesh::n_processors())
 

Static Public Member Functions


static void partition_unpartitioned_elements (MeshBase &mesh, const unsigned int n=libMesh::n_processors())

static void set_parent_processor_ids (MeshBase &mesh)

static void set_node_processor_ids (MeshBase &mesh)
 

Protected Member Functions


virtual void _do_repartition (MeshBase &mesh, const unsigned int n)

virtual void _do_partition (MeshBase &mesh, const unsigned int n)

void single_partition (MeshBase &mesh)
 

Static Protected Attributes


static const unsigned int communication_blocksize = 1000000
 

Private Member Functions


void initialize (const MeshBase &mesh, const unsigned int n_sbdmns)

void build_graph (const MeshBase &mesh)

void assign_partitioning (MeshBase &mesh)
 

Private Attributes


std::vector< unsigned int > _n_active_elem_on_proc

std::map< unsigned int, unsigned int > _global_index_by_pid_map

std::vector< int > _vtxdist

std::vector< int > _xadj

std::vector< int > _adjncy

std::vector< int > _part

std::vector< float > _tpwgts

std::vector< float > _ubvec

std::vector< int > _options

std::vector< int > _vwgt

int _wgtflag

int _ncon

int _numflag

int _nparts

int _edgecut
 

Detailed Description

The ParmetisPartitioner uses the Parmetis graph partitioner to partition the elements.

Definition at line 42 of file parmetis_partitioner.h.  

Constructor & Destructor Documentation

 

ParmetisPartitioner::ParmetisPartitioner () [inline]Constructor.

Definition at line 49 of file parmetis_partitioner.h.

Referenced by clone().

{}
 

Member Function Documentation

 

void ParmetisPartitioner::_do_partition (MeshBase &mesh, const unsigned intn) [protected, virtual]Partition the MeshBase into n subdomains.

Implements Partitioner.

Definition at line 54 of file parmetis_partitioner.C.

References _do_repartition().

{
  this->_do_repartition (mesh, n_sbdmns);

//   libmesh_assert (n_sbdmns > 0);

//   // Check for an easy return
//   if (n_sbdmns == 1)
//     {
//       this->single_partition (mesh);
//       return;
//     }

//   // This function must be run on all processors at once
//   parallel_only();

// // What to do if the Parmetis library IS NOT present
// #ifndef LIBMESH_HAVE_PARMETIS

//   libmesh_here();
//   std::cerr << 'ERROR: The library has been built without'  << std::endl
//          << 'Parmetis support.  Using a Metis'           << std::endl
//          << 'partitioner instead!'                       << std::endl;

//   MetisPartitioner mp;

//   mp.partition (mesh, n_sbdmns);
  
// // What to do if the Parmetis library IS present
// #else

//   START_LOG('partition()', 'ParmetisPartitioner');

//   // Initialize the data structures required by ParMETIS
//   this->initialize (mesh, n_sbdmns);

//   // build the graph corresponding to the mesh
//   this->build_graph (mesh);
  

//   // Partition the graph
//   MPI_Comm mpi_comm = libMesh::COMM_WORLD;
  
//   // Call the ParMETIS k-way partitioning algorithm.
//   Parmetis::ParMETIS_V3_PartKway(&_vtxdist[0], &_xadj[0], &_adjncy[0], &_vwgt[0], NULL,
//                               &_wgtflag, &_numflag, &_ncon, &_nparts, &_tpwgts[0],
//                               &_ubvec[0], &_options[0], &_edgecut,
//                               &_part[0],
//                               &mpi_comm);

//   // Assign the returned processor ids
//   this->assign_partitioning (mesh);
  

//   STOP_LOG ('partition()', 'ParmetisPartitioner');
  
// #endif // #ifndef LIBMESH_HAVE_PARMETIS ... else ...
  
}
 

void ParmetisPartitioner::_do_repartition (MeshBase &mesh, const unsigned intn) [protected, virtual]Parmetis can handle dynamically repartitioning a mesh such that the redistribution costs are minimized. This method takes a previously partitioned domain (which may have then been adaptively refined) and repartitions it.

Reimplemented from Partitioner.

Definition at line 117 of file parmetis_partitioner.C.

References _adjncy, _edgecut, _n_active_elem_on_proc, _ncon, _nparts, _numflag, _options, _part, _tpwgts, _ubvec, _vtxdist, _vwgt, _wgtflag, _xadj, assign_partitioning(), build_graph(), libMesh::COMM_WORLD, initialize(), MeshBase::is_serial(), libMesh::n_processors(), Partitioner::partition(), and Partitioner::single_partition().

Referenced by _do_partition().

{
  libmesh_assert (n_sbdmns > 0);

  // Check for an easy return
  if (n_sbdmns == 1)
    {
      this->single_partition(mesh);
      return;
    }

  // This function must be run on all processors at once
  parallel_only();

// What to do if the Parmetis library IS NOT present
#ifndef LIBMESH_HAVE_PARMETIS

  libmesh_here();
  std::cerr << 'ERROR: The library has been built without'  << std::endl
            << 'Parmetis support.  Using a Metis'           << std::endl
            << 'partitioner instead!'                       << std::endl;

  MetisPartitioner mp;

  mp.partition (mesh, n_sbdmns);
  
// What to do if the Parmetis library IS present
#else

  // Revert to METIS on one processor.
  if (libMesh::n_processors() == 1)
    {
      MetisPartitioner mp;
      mp.partition (mesh, n_sbdmns);
      return;   
    }
  
  START_LOG('repartition()', 'ParmetisPartitioner');

  // Initialize the data structures required by ParMETIS
  this->initialize (mesh, n_sbdmns);

  // Make sure all processors have some active local elements.
  {
    bool all_have_active_elements = true;
    for (unsigned int pid=0; pid<_n_active_elem_on_proc.size(); pid++)
      if (!_n_active_elem_on_proc[pid]) all_have_active_elements = false;

    // Parmetis will not work unless each processor has some
    // elements. Specifically, it will abort when passed a NULL
    // partition array on *any* of the processors.
    if (!all_have_active_elements)
      {
        if (!mesh.is_serial())
          libmesh_error();

        // Cowardly revert to METIS, although this requires a serial mesh
        STOP_LOG ('repartition()', 'ParmetisPartitioner');
  
        MetisPartitioner mp;
        mp.partition (mesh, n_sbdmns);
        return; 
      }
  }
  
  // build the graph corresponding to the mesh
  this->build_graph (mesh);
  

  // Partition the graph
  std::vector<int> vsize(_vwgt.size(), 1);
  float itr = 1000000.0;
  MPI_Comm mpi_comm = libMesh::COMM_WORLD;

  // Call the ParMETIS adaptive repartitioning method.  This respects the
  // original partitioning when computing the new partitioning so as to
  // minimize the required data redistribution.
  Parmetis::ParMETIS_V3_AdaptiveRepart(_vtxdist.empty() ? NULL : &_vtxdist[0],
                                       _xadj.empty()    ? NULL : &_xadj[0],
                                       _adjncy.empty()  ? NULL : &_adjncy[0],
                                       _vwgt.empty()    ? NULL : &_vwgt[0],
                                       vsize.empty()    ? NULL : &vsize[0],
                                       NULL,
                                       &_wgtflag,
                                       &_numflag,
                                       &_ncon,
                                       &_nparts,
                                       _tpwgts.empty()  ? NULL : &_tpwgts[0],
                                       _ubvec.empty()   ? NULL : &_ubvec[0],
                                       &itr,
                                       &_options[0],
                                       &_edgecut,
                                       _part.empty()    ? NULL : &_part[0],
                                       &mpi_comm);

  // Assign the returned processor ids
  this->assign_partitioning (mesh);
  

  STOP_LOG ('repartition()', 'ParmetisPartitioner');
  
#endif // #ifndef LIBMESH_HAVE_PARMETIS ... else ...
  
}
 

void ParmetisPartitioner::assign_partitioning (MeshBase &mesh) [private]Assign the computed partitioning to the mesh.

Definition at line 561 of file parmetis_partitioner.C.

References _global_index_by_pid_map, _nparts, _part, _vtxdist, MeshBase::active_elements_begin(), MeshBase::active_elements_end(), DofObject::id(), MeshBase::n_active_local_elem(), libMesh::n_processors(), DofObject::processor_id(), and libMesh::processor_id().

Referenced by _do_repartition().

{
  const unsigned int 
    first_local_elem = _vtxdist[libMesh::processor_id()];
    
  std::vector<std::vector<unsigned int> > 
    requested_ids(libMesh::n_processors()),
    requests_to_fill(libMesh::n_processors());
  
  MeshBase::element_iterator elem_it  = mesh.active_elements_begin();
  MeshBase::element_iterator elem_end = mesh.active_elements_end();
    
  for (; elem_it != elem_end; ++elem_it)
    {   
      Elem *elem = *elem_it;

      // we need to get the index from the owning processor
      // (note we cannot assign it now -- we are iterating
      // over elements again and this will be bad!)
      libmesh_assert (elem->processor_id() < requested_ids.size());
      requested_ids[elem->processor_id()].push_back(elem->id());
    }
  
  // Trade with all processors (including self) to get their indices
  for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)
    {
      // Trade my requests with processor procup and procdown
      const unsigned int procup = (libMesh::processor_id() + pid) %
                                   libMesh::n_processors();
      const unsigned int procdown = (libMesh::n_processors() +
                                     libMesh::processor_id() - pid) %
                                     libMesh::n_processors();

      Parallel::send_receive (procup,   requested_ids[procup],
                              procdown, requests_to_fill[procdown]); 

      // we can overwrite these requested ids in-place.
      for (unsigned int i=0; i<requests_to_fill[procdown].size(); i++)
        {
          const unsigned int requested_elem_index = 
            requests_to_fill[procdown][i];

          libmesh_assert(_global_index_by_pid_map.count(requested_elem_index));

          const unsigned int global_index_by_pid = 
            _global_index_by_pid_map[requested_elem_index];

          const unsigned int local_index =
            global_index_by_pid - first_local_elem;
            
          libmesh_assert (local_index < _part.size());
          libmesh_assert (local_index < mesh.n_active_local_elem());
          
          const unsigned int elem_procid =
            static_cast<unsigned int>(_part[local_index]);

          libmesh_assert (elem_procid < static_cast<unsigned int>(_nparts));

          requests_to_fill[procdown][i] = elem_procid;
        }

      // Trade back
      Parallel::send_receive (procdown, requests_to_fill[procdown],
                              procup,   requested_ids[procup]);
    }

   // and finally assign the partitioning.
   // note we are iterating in exactly the same order
   // used to build up the request, so we can expect the
   // required entries to be in the proper sequence.
   elem_it  = mesh.active_elements_begin();
   elem_end = mesh.active_elements_end();      
   
   for (std::vector<unsigned int> counters(libMesh::n_processors(), 0);  
        elem_it != elem_end; ++elem_it)
     {  
       Elem *elem = *elem_it;
       
       const unsigned int current_pid = elem->processor_id();
       
       libmesh_assert (counters[current_pid] < requested_ids[current_pid].size());
       
       const unsigned int elem_procid = 
         requested_ids[current_pid][counters[current_pid]++];
       
       libmesh_assert (elem_procid < static_cast<unsigned int>(_nparts));
       elem->processor_id() = elem_procid;
     }
}
 

void ParmetisPartitioner::build_graph (const MeshBase &mesh) [private]Build the graph.

Definition at line 437 of file parmetis_partitioner.C.

References _adjncy, _global_index_by_pid_map, _vtxdist, _xadj, Elem::active(), Elem::active_family_tree(), MeshBase::active_local_elements_begin(), MeshBase::active_local_elements_end(), DofObject::id(), MeshBase::n_active_local_elem(), Elem::n_neighbors(), Elem::neighbor(), libMesh::processor_id(), and Elem::which_neighbor_am_i().

Referenced by _do_repartition().

{
  // build the graph in distributed CSR format.  Note that
  // the edges in the graph will correspond to
  // face neighbors
  const unsigned int n_active_local_elem  = mesh.n_active_local_elem();
    
  std::vector<const Elem*> neighbors_offspring;
  
  std::vector<std::vector<unsigned int> > graph(n_active_local_elem);
  unsigned int graph_size=0;

  const unsigned int first_local_elem = _vtxdist[libMesh::processor_id()];
  
  MeshBase::const_element_iterator       elem_it  = mesh.active_local_elements_begin();
  const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end(); 
  
  for (; elem_it != elem_end; ++elem_it)
    {
      const Elem* elem = *elem_it;
        
      libmesh_assert (_global_index_by_pid_map.count(elem->id()));
      const unsigned int global_index_by_pid = 
        _global_index_by_pid_map[elem->id()];   
        
      const unsigned int local_index =
        global_index_by_pid - first_local_elem;
      libmesh_assert (local_index < n_active_local_elem);

      std::vector<unsigned int> &graph_row = graph[local_index];

      // Loop over the element's neighbors.  An element
      // adjacency corresponds to a face neighbor
      for (unsigned int ms=0; ms<elem->n_neighbors(); ms++)
        {
          const Elem* neighbor = elem->neighbor(ms);
          
          if (neighbor != NULL)
            {  
              // If the neighbor is active treat it
              // as a connection
              if (neighbor->active())
                {
                  libmesh_assert(_global_index_by_pid_map.count(neighbor->id()));
                  const unsigned int neighbor_global_index_by_pid =
                    _global_index_by_pid_map[neighbor->id()];
                  
                  graph_row.push_back(neighbor_global_index_by_pid);
                  graph_size++;
                }
  
#ifdef LIBMESH_ENABLE_AMR
              
              // Otherwise we need to find all of the
              // neighbor's children that are connected to
              // us and add them
              else
                {
                  // The side of the neighbor to which
                  // we are connected
                  const unsigned int ns =
                    neighbor->which_neighbor_am_i (elem);
                  libmesh_assert (ns < neighbor->n_neighbors());
                  
                  // Get all the active children (& grandchildren, etc...)
                  // of the neighbor.
                  neighbor->active_family_tree (neighbors_offspring);
                  
                  // Get all the neighbor's children that
                  // live on that side and are thus connected
                  // to us
                  for (unsigned int nc=0; nc<neighbors_offspring.size(); nc++)
                    {
                      const Elem* child =
                        neighbors_offspring[nc];
                      
                      // This does not assume a level-1 mesh.
                      // Note that since children have sides numbered
                      // coincident with the parent then this is a sufficient test.
                      if (child->neighbor(ns) == elem)
                        {
                          libmesh_assert (child->active());
                          libmesh_assert (_global_index_by_pid_map.count(child->id()));
                          const unsigned int child_global_index_by_pid =
                            _global_index_by_pid_map[child->id()];

                          graph_row.push_back(child_global_index_by_pid);
                          graph_size++;
                        }
                    }
                }

#endif /* ifdef LIBMESH_ENABLE_AMR */


            }
        }
    }

  // Reserve space in the adjacency array
  _xadj.clear();
  _xadj.reserve (n_active_local_elem + 1);
  _adjncy.clear();
  _adjncy.reserve (graph_size);

  for (unsigned int r=0; r<graph.size(); r++)
    {
      _xadj.push_back(_adjncy.size());
      std::vector<unsigned int> graph_row; // build this emtpy
      graph_row.swap(graph[r]); // this will deallocate at the end of scope
      _adjncy.insert(_adjncy.end(),
                     graph_row.begin(),
                     graph_row.end());
    }
  
  // The end of the adjacency array for the last elem
  _xadj.push_back(_adjncy.size());
  
  libmesh_assert (_xadj.size() == n_active_local_elem+1);
  libmesh_assert (_adjncy.size() == graph_size);
}
 

virtual AutoPtr<Partitioner> ParmetisPartitioner::clone () const [inline, virtual]Creates a new partitioner of this type and returns it in an AutoPtr.

Implements Partitioner.

Definition at line 55 of file parmetis_partitioner.h.

References ParmetisPartitioner().

                                              {
    AutoPtr<Partitioner> cloned_partitioner
      (new ParmetisPartitioner());
    return cloned_partitioner;
  }
 

void ParmetisPartitioner::initialize (const MeshBase &mesh, const unsigned intn_sbdmns) [private]Initialize data structures.

Definition at line 228 of file parmetis_partitioner.C.

References _edgecut, _global_index_by_pid_map, _n_active_elem_on_proc, _ncon, _nparts, _numflag, _options, _part, _tpwgts, _ubvec, _vtxdist, _vwgt, _wgtflag, MeshBase::active_elements_begin(), MeshBase::active_elements_end(), MeshBase::active_local_elements_begin(), MeshBase::active_local_elements_end(), MeshBase::active_pid_elements_begin(), MeshBase::active_pid_elements_end(), MeshTools::bounding_box(), MeshCommunication::find_global_indices(), DofObject::id(), std::min(), MeshBase::n_active_local_elem(), Elem::n_nodes(), libMesh::n_processors(), and libMesh::processor_id().

Referenced by _do_repartition().

{
  const unsigned int n_active_local_elem = mesh.n_active_local_elem();

  // Set parameters.
  _wgtflag = 2;                          // weights on vertices only
  _ncon    = 1;                          // one weight per vertex
  _numflag = 0;                          // C-style 0-based numbering
  _nparts  = static_cast<int>(n_sbdmns); // number of subdomains to create
  _edgecut = 0;                          // the numbers of edges cut by the
                                         //   partition

  // Initialize data structures for ParMETIS
  _vtxdist.resize (libMesh::n_processors()+1); std::fill (_vtxdist.begin(), _vtxdist.end(), 0);
  _tpwgts.resize  (_nparts);                   std::fill (_tpwgts.begin(),  _tpwgts.end(),  1./_nparts);
  _ubvec.resize   (_ncon);                     std::fill (_ubvec.begin(),   _ubvec.end(),   1.);
  _part.resize    (n_active_local_elem);       std::fill (_part.begin(),    _part.end(), 0);
  _options.resize (5);
  _vwgt.resize    (n_active_local_elem);
  
  // Set the options
  _options[0] = 1;  // don't use default options
  _options[1] = 0;  // default (level of timing)
  _options[2] = 15; // random seed (default)
  _options[3] = 2;  // processor distribution and subdomain distribution are decoupled
  
  // Find the number of active elements on each processor.  We cannot use
  // mesh.n_active_elem_on_proc(pid) since that only returns the number of
  // elements assigned to pid which are currently stored on the calling 
  // processor. This will not in general be correct for parallel meshes
  // when (pid!=libMesh::processor_id()).
  _n_active_elem_on_proc.resize(libMesh::n_processors());
  Parallel::allgather(n_active_local_elem, _n_active_elem_on_proc);

  // count the total number of active elements in the mesh.  Note we cannot
  // use mesh.n_active_elem() in general since this only returns the number
  // of active elements which are stored on the calling processor.
  // We should not use n_active_elem for any allocation because that will
  // be inheritly unscalable, but it can be useful for libmesh_assertions.
  unsigned int n_active_elem=0;

  // Set up the vtxdist array.  This will be the same on each processor.
  // ***** Consult the Parmetis documentation. *****
  libmesh_assert (_vtxdist.size() == libMesh::n_processors()+1);
  libmesh_assert (_vtxdist[0] == 0);
  
  for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)
    {
      _vtxdist[pid+1] = _vtxdist[pid] + _n_active_elem_on_proc[pid];
      n_active_elem += _n_active_elem_on_proc[pid];
    }
  libmesh_assert (_vtxdist.back() == static_cast<int>(n_active_elem));

  // ParMetis expects the elements to be numbered in contiguous blocks
  // by processor, i.e. [0, ne0), [ne0, ne0+ne1), ...
  // Since we only partition active elements we should have no expectation
  // that we currently have such a distribution.  So we need to create it.
  // Also, at the same time we are going to map all the active elements into a globally
  // unique range [0,n_active_elem) which is *independent* of the current partitioning.
  // This can be fed to ParMetis as the initial partitioning of the subdomains (decoupled
  // from the partitioning of the objects themselves).  This allows us to get the same
  // resultant partitioning independed of the input partitioning.
  MeshTools::BoundingBox bbox =
    MeshTools::bounding_box(mesh);

  _global_index_by_pid_map.clear();

  // Maps active element ids into a contiguous range independent of partitioning.
  // (only needs local scope)
  std::map<unsigned int, unsigned int> global_index_map;

  {
    std::vector<unsigned int> global_index;

    // create the mapping which is contiguous by processor 
    for (unsigned int pid=0, pid_offset=0; pid<libMesh::n_processors(); pid++)
      {
        MeshBase::const_element_iterator       it  = mesh.active_pid_elements_begin(pid);
        const MeshBase::const_element_iterator end = mesh.active_pid_elements_end(pid); 
        
        // note that we may not have all (or any!) the active elements which belong on this processor,
        // but by calling this on all processors a unique range in [0,_n_active_elem_on_proc[pid])
        // is constructed.  Only the indices for the elements we pass in are returned in the array.
        MeshCommunication().find_global_indices (bbox, it, end, 
                                                 global_index);
        
        for (unsigned int cnt=0; it != end; ++it)
          {
            const Elem *elem = *it;
            libmesh_assert (!_global_index_by_pid_map.count(elem->id()));
            libmesh_assert (cnt < global_index.size());
            libmesh_assert (global_index[cnt] < _n_active_elem_on_proc[pid]);

            _global_index_by_pid_map[elem->id()]  = global_index[cnt++] + pid_offset;
          }

        pid_offset += _n_active_elem_on_proc[pid];
      }

    // create the unique mapping for all active elements independent of partitioning
    {
      MeshBase::const_element_iterator       it  = mesh.active_elements_begin();
      const MeshBase::const_element_iterator end = mesh.active_elements_end(); 
        
      // Calling this on all processors a unique range in [0,n_active_elem) is constructed.  
      // Only the indices for the elements we pass in are returned in the array.
      MeshCommunication().find_global_indices (bbox, it, end, 
                                               global_index);
      
      for (unsigned int cnt=0; it != end; ++it)
        {
          const Elem *elem = *it;
          libmesh_assert (!global_index_map.count(elem->id()));
          libmesh_assert (cnt < global_index.size());
          libmesh_assert (global_index[cnt] < n_active_elem);

          global_index_map[elem->id()]  = global_index[cnt++];
        }
      }
    // really, shouldn't be close!
    libmesh_assert (global_index_map.size() <= n_active_elem);
    libmesh_assert (_global_index_by_pid_map.size() <= n_active_elem);

    // At this point the two maps should be the same size.  If they are not
    // then the number of active elements is not the same as the sum over all 
    // processors of the number of active elements per processor, which means
    // there must be some unpartitioned objects out there.
    if (global_index_map.size() != _global_index_by_pid_map.size())
      {
        std::cerr << 'ERROR:  ParmetisPartitioner cannot handle unpartitioned objects!'
                  << std::endl;
        libmesh_error();
      }    
  }

  // Finally, we need to initialize the vertex (partition) weights and the initial subdomain
  // mapping.  The subdomain mapping will be independent of the processor mapping, and is 
  // defined by a simple mapping of the global indices we just found.
  {
    std::vector<unsigned int> subdomain_bounds(libMesh::n_processors());

    const unsigned int first_local_elem = _vtxdist[libMesh::processor_id()];

    for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)
      {
        unsigned int tgt_subdomain_size = 0;
        
        // watch out for the case that n_subdomains < n_processors
        if (pid < static_cast<unsigned int>(_nparts))
          {     
            tgt_subdomain_size =
              n_active_elem/std::min(libMesh::n_processors(),
                                     static_cast<unsigned int>(_nparts));
            
            if (pid < n_active_elem%_nparts)
              tgt_subdomain_size++;
          }
        if (pid == 0)
          subdomain_bounds[0] = tgt_subdomain_size;
        else
          subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size;
      }

    libmesh_assert (subdomain_bounds.back() == n_active_elem);

    MeshBase::const_element_iterator       elem_it  = mesh.active_local_elements_begin();
    const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();
    
    for (; elem_it != elem_end; ++elem_it)
      {
        const Elem *elem = *elem_it;
        
        libmesh_assert (_global_index_by_pid_map.count(elem->id()));
        const unsigned int global_index_by_pid = 
          _global_index_by_pid_map[elem->id()]; 
        libmesh_assert (global_index_by_pid < n_active_elem);
        
        const unsigned int local_index =
          global_index_by_pid - first_local_elem;
        
        libmesh_assert (local_index < n_active_local_elem);
        libmesh_assert (local_index < _vwgt.size());
        
        // TODO:[BSK] maybe there is a better weight?
        _vwgt[local_index] = elem->n_nodes();
        
        // find the subdomain this element belongs in
        libmesh_assert (global_index_map.count(elem->id()));
        const unsigned int global_index =
          global_index_map[elem->id()];
        
        libmesh_assert (global_index < subdomain_bounds.back());
        
        const unsigned int subdomain_id =
          std::distance(subdomain_bounds.begin(),
                        std::lower_bound(subdomain_bounds.begin(),
                                         subdomain_bounds.end(),
                                         global_index));
        libmesh_assert (subdomain_id < static_cast<unsigned int>(_nparts));
        libmesh_assert (local_index < _part.size());
          
        _part[local_index] = subdomain_id;
      }    
  }
}
 

void Partitioner::partition (MeshBase &mesh, const unsigned intn = libMesh::n_processors()) [inherited]Partition the MeshBase into n parts. If the user does not specify a number of pieces into which the mesh should be partitioned, then the default behavior of the partitioner is to partition according to the number of processors defined in libMesh::n_processors(). The partitioner currently does not modify the subdomain_id of each element. This number is reserved for things like material properties, etc.

Definition at line 43 of file partitioner.C.

References Partitioner::_do_partition(), MeshBase::is_serial(), std::min(), MeshBase::n_active_elem(), Partitioner::partition_unpartitioned_elements(), MeshBase::set_n_partitions(), Partitioner::set_node_processor_ids(), Partitioner::set_parent_processor_ids(), and Partitioner::single_partition().

Referenced by SFCPartitioner::_do_partition(), MetisPartitioner::_do_partition(), and _do_repartition().

{
  // BSK - temporary fix while redistribution is integrated 6/26/2008
  // Uncomment this to not repartition in parallel
   if (!mesh.is_serial())
     return;

  // we cannot partition into more pieces than we have
  // active elements!
  const unsigned int n_parts =
    std::min(mesh.n_active_elem(), n);
  
  // Set the number of partitions in the mesh
  mesh.set_n_partitions()=n_parts;

  if (n_parts == 1)
    {
      this->single_partition (mesh);
      return;
    }
  
  // First assign a temporary partitioning to any unpartitioned elements
  Partitioner::partition_unpartitioned_elements(mesh, n_parts);
  
  // Call the partitioning function
  this->_do_partition(mesh,n_parts);

  // Set the parent's processor ids
  Partitioner::set_parent_processor_ids(mesh);

  // Set the node's processor ids
  Partitioner::set_node_processor_ids(mesh);
}
 

void Partitioner::partition_unpartitioned_elements (MeshBase &mesh, const unsigned intn = libMesh::n_processors()) [static, inherited]This function

Definition at line 139 of file partitioner.C.

References MeshTools::bounding_box(), MeshCommunication::find_global_indices(), MeshTools::n_elem(), libMesh::n_processors(), DofObject::processor_id(), MeshBase::unpartitioned_elements_begin(), and MeshBase::unpartitioned_elements_end().

Referenced by Partitioner::partition(), and Partitioner::repartition().

{
  MeshBase::const_element_iterator       it  = mesh.unpartitioned_elements_begin();
  const MeshBase::const_element_iterator end = mesh.unpartitioned_elements_end();

  const unsigned int n_unpartitioned_elements = MeshTools::n_elem (it, end);

  // the unpartitioned elements must exist on all processors. If the range is empty on one
  // it is empty on all, and we can quit right here.
  if (!n_unpartitioned_elements) return;
         
  // find the target subdomain sizes
  std::vector<unsigned int> subdomain_bounds(libMesh::n_processors());

  for (unsigned int pid=0; pid<libMesh::n_processors(); pid++)
    {
      unsigned int tgt_subdomain_size = 0;

      // watch out for the case that n_subdomains < n_processors
      if (pid < n_subdomains)
        {
          tgt_subdomain_size = n_unpartitioned_elements/n_subdomains;
      
          if (pid < n_unpartitioned_elements%n_subdomains)
            tgt_subdomain_size++;

        }
      
      //std::cout << 'pid, #= ' << pid << ', ' << tgt_subdomain_size << std::endl;
      if (pid == 0)
        subdomain_bounds[0] = tgt_subdomain_size;
      else
        subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size;
    }
  
  libmesh_assert (subdomain_bounds.back() == n_unpartitioned_elements);  
  
  // create the unique mapping for all unpartitioned elements independent of partitioning
  // determine the global indexing for all the unpartitoned elements
  std::vector<unsigned int> global_indices;
    
  // Calling this on all processors a unique range in [0,n_unpartitioned_elements) is constructed.  
  // Only the indices for the elements we pass in are returned in the array.
  MeshCommunication().find_global_indices (MeshTools::bounding_box(mesh), it, end, 
                                           global_indices);
  
  for (unsigned int cnt=0; it != end; ++it)
    {
      Elem *elem = *it;
      
      libmesh_assert (cnt < global_indices.size());
      const unsigned int global_index =
        global_indices[cnt++];
      
      libmesh_assert (global_index < subdomain_bounds.back());
      libmesh_assert (global_index < n_unpartitioned_elements);

      const unsigned int subdomain_id =
        std::distance(subdomain_bounds.begin(),
                      std::upper_bound(subdomain_bounds.begin(),
                                       subdomain_bounds.end(),
                                       global_index));
      libmesh_assert (subdomain_id < n_subdomains);
     
      elem->processor_id() = subdomain_id;              
      //std::cout << 'assigning ' << global_index << ' to ' << subdomain_id << std::endl;             
    }
}
 

void Partitioner::repartition (MeshBase &mesh, const unsigned intn = libMesh::n_processors()) [inherited]Repartitions the MeshBase into n parts. This is required since some partitoning algorithms can repartition more efficiently than computing a new partitioning from scratch. The default behavior is to simply call this->partition(n)

Definition at line 82 of file partitioner.C.

References Partitioner::_do_repartition(), std::min(), MeshBase::n_active_elem(), Partitioner::partition_unpartitioned_elements(), MeshBase::set_n_partitions(), Partitioner::set_node_processor_ids(), Partitioner::set_parent_processor_ids(), and Partitioner::single_partition().

{
  // we cannot partition into more pieces than we have
  // active elements!
  const unsigned int n_parts =
    std::min(mesh.n_active_elem(), n);
  
  // Set the number of partitions in the mesh
  mesh.set_n_partitions()=n_parts;

  if (n_parts == 1)
    {
      this->single_partition (mesh);
      return;
    }
  
  // First assign a temporary partitioning to any unpartitioned elements
  Partitioner::partition_unpartitioned_elements(mesh, n_parts);
  
  // Call the partitioning function
  this->_do_repartition(mesh,n_parts);
  
  // Set the parent's processor ids
  Partitioner::set_parent_processor_ids(mesh);
  
  // Set the node's processor ids
  Partitioner::set_node_processor_ids(mesh);
}
 

void Partitioner::set_node_processor_ids (MeshBase &mesh) [static, inherited]This function is called after partitioning to set the processor IDs for the nodes. By definition, a Node's processor ID is the minimum processor ID for all of the elements which share the node.

Definition at line 353 of file partitioner.C.

References MeshBase::active_elements_begin(), MeshBase::active_elements_end(), MeshBase::elements_begin(), MeshBase::elements_end(), Elem::get_node(), DofObject::id(), DofObject::invalid_processor_id, DofObject::invalidate_processor_id(), std::min(), MeshTools::n_elem(), Elem::n_nodes(), MeshBase::n_partitions(), libMesh::n_processors(), MeshBase::node_ptr(), MeshBase::nodes_begin(), MeshBase::nodes_end(), MeshBase::not_active_elements_begin(), MeshBase::not_active_elements_end(), libMesh::processor_id(), DofObject::processor_id(), MeshBase::subactive_elements_begin(), MeshBase::subactive_elements_end(), MeshBase::unpartitioned_elements_begin(), and MeshBase::unpartitioned_elements_end().

Referenced by Partitioner::partition(), XdrIO::read(), Partitioner::repartition(), and BoundaryInfo::sync().

{
  START_LOG('set_node_processor_ids()','Partitioner');

  // This function must be run on all processors at once
  parallel_only();

  // If we have any unpartitioned elements at this 
  // stage there is a problem
  libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
                            mesh.unpartitioned_elements_end()) == 0);


//   const unsigned int orig_n_local_nodes = mesh.n_local_nodes();

//   std::cerr << '[' << libMesh::processor_id() << ']: orig_n_local_nodes='
//          << orig_n_local_nodes << std::endl;

  // Build up request sets.  Each node is currently owned by a processor because
  // it is connected to an element owned by that processor.  However, during the
  // repartitioning phase that element may have been assigned a new processor id, but
  // it is still resident on the original processor.  We need to know where to look
  // for new ids before assigning new ids, otherwise we may be asking the wrong processors
  // for the wrong information.
  //
  // The only remaining issue is what to do with unpartitioned nodes.  Since they are required
  // to live on all processors we can simply rely on ourselves to number them properly.
  std::vector<std::vector<unsigned int> >
    requested_node_ids(libMesh::n_processors());

  // Loop over all the nodes, count the ones on each processor.  We can skip ourself
  std::vector<unsigned int> ghost_nodes_from_proc(libMesh::n_processors(), 0);

  MeshBase::node_iterator       node_it  = mesh.nodes_begin();
  const MeshBase::node_iterator node_end = mesh.nodes_end();
  
  for (; node_it != node_end; ++node_it)
    {
      Node *node = *node_it;
      libmesh_assert(node);
      const unsigned int current_pid = node->processor_id();
      if (current_pid != libMesh::processor_id() &&
          current_pid != DofObject::invalid_processor_id)
        {
          libmesh_assert(current_pid < ghost_nodes_from_proc.size());
          ghost_nodes_from_proc[current_pid]++;
        }
    }

  // We know how many objects live on each processor, so reserve()
  // space for each.
  for (unsigned int pid=0; pid != libMesh::n_processors(); ++pid)
    requested_node_ids[pid].reserve(ghost_nodes_from_proc[pid]);

  // We need to get the new pid for each node from the processor
  // which *currently* owns the node.  We can safely skip ourself
  for (node_it = mesh.nodes_begin(); node_it != node_end; ++node_it)
    {
      Node *node = *node_it;
      libmesh_assert(node);
      const unsigned int current_pid = node->processor_id();      
      if (current_pid != libMesh::processor_id() &&
          current_pid != DofObject::invalid_processor_id)
        {
          libmesh_assert(current_pid < requested_node_ids.size());
          libmesh_assert(requested_node_ids[current_pid].size() <
                 ghost_nodes_from_proc[current_pid]);
          requested_node_ids[current_pid].push_back(node->id());
        }
      
      // Unset any previously-set node processor ids
      node->invalidate_processor_id();
    }
  
  // Loop over all the active elements
  MeshBase::element_iterator       elem_it  = mesh.active_elements_begin();
  const MeshBase::element_iterator elem_end = mesh.active_elements_end(); 
  
  for ( ; elem_it != elem_end; ++elem_it)
    {
      Elem* elem = *elem_it;
      libmesh_assert(elem);

      libmesh_assert (elem->processor_id() != DofObject::invalid_processor_id);
      
      // For each node, set the processor ID to the min of
      // its current value and this Element's processor id.
      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        elem->get_node(n)->processor_id() = std::min(elem->get_node(n)->processor_id(),
                                                     elem->processor_id());
    }

  // And loop over the subactive elements, but don't reassign
  // nodes that are already active on another processor.
  MeshBase::element_iterator       sub_it  = mesh.subactive_elements_begin();
  const MeshBase::element_iterator sub_end = mesh.subactive_elements_end(); 
  
  for ( ; sub_it != sub_end; ++sub_it)
    {
      Elem* elem = *sub_it;
      libmesh_assert(elem);

      libmesh_assert (elem->processor_id() != DofObject::invalid_processor_id);
      
      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        if (elem->get_node(n)->processor_id() == DofObject::invalid_processor_id)
          elem->get_node(n)->processor_id() = elem->processor_id();
    }

  // Same for the inactive elements -- we will have already gotten most of these
  // nodes, *except* for the case of a parent with a subset of children which are
  // ghost elements.  In that case some of the parent nodes will not have been
  // properly handled yet
  MeshBase::element_iterator       not_it  = mesh.not_active_elements_begin();
  const MeshBase::element_iterator not_end = mesh.not_active_elements_end(); 
  
  for ( ; not_it != not_end; ++not_it)
    {
      Elem* elem = *not_it;
      libmesh_assert(elem);

      libmesh_assert (elem->processor_id() != DofObject::invalid_processor_id);
      
      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        if (elem->get_node(n)->processor_id() == DofObject::invalid_processor_id)
          elem->get_node(n)->processor_id() = elem->processor_id();
    }

#ifndef NDEBUG
  {
    // make sure all the nodes connected to any element have received a
    // valid processor id
    std::set<const Node*> used_nodes;
    MeshBase::element_iterator       all_it  = mesh.elements_begin();
    const MeshBase::element_iterator all_end = mesh.elements_end(); 
  
    for ( ; all_it != all_end; ++all_it)
      {
        Elem* elem = *all_it;
        libmesh_assert(elem);
        libmesh_assert(elem->processor_id() != DofObject::invalid_processor_id);
        for (unsigned int n=0; n<elem->n_nodes(); ++n)
          used_nodes.insert(elem->get_node(n));
      }

    for (node_it = mesh.nodes_begin(); node_it != node_end; ++node_it)
      {
        Node *node = *node_it;
        libmesh_assert(node);
        libmesh_assert(used_nodes.count(node));
        libmesh_assert(node->processor_id() != DofObject::invalid_processor_id);
      }
  }
#endif

  // Next set node ids from other processors, excluding self
  for (unsigned int p=1; p != libMesh::n_processors(); ++p)
    {
      // Trade my requests with processor procup and procdown
      unsigned int procup = (libMesh::processor_id() + p) %
                             libMesh::n_processors();
      unsigned int procdown = (libMesh::n_processors() +
                               libMesh::processor_id() - p) %
                               libMesh::n_processors();
      std::vector<unsigned int> request_to_fill;
      Parallel::send_receive(procup, requested_node_ids[procup],
                             procdown, request_to_fill);

      // Fill those requests in-place
      for (unsigned int i=0; i != request_to_fill.size(); ++i)
        {
          Node *node = mesh.node_ptr(request_to_fill[i]);
          libmesh_assert(node);
          const unsigned int new_pid = node->processor_id();
          libmesh_assert (new_pid != DofObject::invalid_processor_id);
          libmesh_assert (new_pid < mesh.n_partitions()); // this is the correct test --
          request_to_fill[i] = new_pid;           //  the number of partitions may
        }                                         //  not equal the number of processors

      // Trade back the results
      std::vector<unsigned int> filled_request;
      Parallel::send_receive(procdown, request_to_fill,
                             procup,   filled_request);
      libmesh_assert(filled_request.size() == requested_node_ids[procup].size());
      
      // And copy the id changes we've now been informed of
      for (unsigned int i=0; i != filled_request.size(); ++i)
        {
          Node *node = mesh.node_ptr(requested_node_ids[procup][i]);
          libmesh_assert(node);
          libmesh_assert(filled_request[i] < mesh.n_partitions()); // this is the correct test --
          node->processor_id(filled_request[i]);           //  the number of partitions may
        }                                                  //  not equal the number of processors
    }
  
  STOP_LOG('set_node_processor_ids()','Partitioner');
}
 

void Partitioner::set_parent_processor_ids (MeshBase &mesh) [static, inherited]This function is called after partitioning to set the processor IDs for the inactive parent elements. A Parent's processor ID is the same as its first child.

Definition at line 211 of file partitioner.C.

References MeshBase::active_elements_begin(), MeshBase::active_elements_end(), MeshBase::ancestor_elements_begin(), MeshBase::ancestor_elements_end(), Elem::child(), Partitioner::communication_blocksize, DofObject::id(), DofObject::invalid_processor_id, DofObject::invalidate_processor_id(), Elem::is_remote(), MeshBase::is_serial(), MeshBase::max_elem_id(), std::min(), Elem::n_children(), MeshTools::n_elem(), Elem::parent(), DofObject::processor_id(), MeshBase::unpartitioned_elements_begin(), and MeshBase::unpartitioned_elements_end().

Referenced by Partitioner::partition(), and Partitioner::repartition().

{
  START_LOG('set_parent_processor_ids()','Partitioner');
  
  // If the mesh is serial we have access to all the elements,
  // in particular all the active ones.  We can therefore set
  // the parent processor ids indirecly through their children.
  // By convention a parent is assigned to the minimum processor
  // of all its children.
  if (mesh.is_serial())
    {
      // Loop over all the active elements in the mesh  
      MeshBase::element_iterator       it  = mesh.active_elements_begin();
      const MeshBase::element_iterator end = mesh.active_elements_end();
      
      for ( ; it!=end; ++it)
        {
#ifdef LIBMESH_ENABLE_AMR
          Elem *child  = *it;
          Elem *parent = child->parent();

          while (parent)
            {
              // invalidate the parent id, otherwise the min below
              // will not work if the current parent id is less
              // than all the children!
              parent->invalidate_processor_id();
              
              for(unsigned int c=0; c<parent->n_children(); c++)
                {
                  child = parent->child(c);
                  libmesh_assert(child);
                  libmesh_assert(!child->is_remote());
                  libmesh_assert(child->processor_id() != DofObject::invalid_processor_id);
                  parent->processor_id() = std::min(parent->processor_id(),
                                                    child->processor_id());
                }             
              parent = parent->parent();
            }
#else
          libmesh_assert ((*it)->level() == 0);
#endif
          
        }
    }

  // When the mesh is parallel we cannot guarantee that parents have access to
  // all their children.
  else
    {
      // We will use a brute-force approach here.  Each processor finds its parent
      // elements and sets the parent pid to the minimum of its local children.
      // A global reduction is then performed to make sure the true minimum is found.
      // As noted, this is required because we cannot guarantee that a parent has
      // access to all its children on any single processor.
      parallel_only();
      libmesh_assert(MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
                               mesh.unpartitioned_elements_end()) == 0);

      const unsigned int max_elem_id = mesh.max_elem_id();

      std::vector<unsigned short int>
        parent_processor_ids (std::min(communication_blocksize,
                                       max_elem_id));
      
      for (unsigned int blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++)
        {
                              last_elem_id = std::min((blk+1)*communication_blocksize, max_elem_id);
          const unsigned int first_elem_id = blk*communication_blocksize;

          std::fill (parent_processor_ids.begin(),
                     parent_processor_ids.end(),
                     DofObject::invalid_processor_id);

          // first build up local contributions to parent_processor_ids
          MeshBase::element_iterator       not_it  = mesh.ancestor_elements_begin();
          const MeshBase::element_iterator not_end = mesh.ancestor_elements_end(); 

          bool have_parent_in_block = false;
          
          for ( ; not_it != not_end; ++not_it)
            {
#ifdef LIBMESH_ENABLE_AMR
              Elem *parent = *not_it;

              const unsigned int parent_idx = parent->id();
              libmesh_assert (parent_idx < max_elem_id);

              if ((parent_idx >= first_elem_id) &&
                  (parent_idx <  last_elem_id))
                {
                  have_parent_in_block = true;
                  unsigned short int parent_pid = DofObject::invalid_processor_id;

                  for (unsigned int c=0; c<parent->n_children(); c++)
                    parent_pid = std::min (parent_pid, parent->child(c)->processor_id());
                  
                  const unsigned int packed_idx = parent_idx - first_elem_id;
                  libmesh_assert (packed_idx < parent_processor_ids.size());

                  parent_processor_ids[packed_idx] = parent_pid;
                }
#else
              // without AMR there should be no inactive elements
              libmesh_error();
#endif
            }

          // then find the global minimum
          Parallel::min (parent_processor_ids);

          // and assign the ids, if we have a parent in this block.
          if (have_parent_in_block)
            for (not_it = mesh.ancestor_elements_begin();
                 not_it != not_end; ++not_it)
              {
                Elem *parent = *not_it;
                
                const unsigned int parent_idx = parent->id();
                
                if ((parent_idx >= first_elem_id) &&
                    (parent_idx <  last_elem_id))
                  {
                    const unsigned int packed_idx = parent_idx - first_elem_id;
                    libmesh_assert (packed_idx < parent_processor_ids.size());
                    
                    const unsigned short int parent_pid =
                      parent_processor_ids[packed_idx];
                    
                    libmesh_assert (parent_pid != DofObject::invalid_processor_id);
                    
                    parent->processor_id() = parent_pid;
                  }
              }
        }
    }
  
  STOP_LOG('set_parent_processor_ids()','Partitioner');
}
 

void Partitioner::single_partition (MeshBase &mesh) [protected, inherited]Trivially 'partitions' the mesh for one processor. Simply loops through the elements and assigns all of them to processor 0. Is is provided as a separate function so that derived classes may use it without reimplementing it.

Definition at line 116 of file partitioner.C.

References MeshBase::elements_begin(), MeshBase::elements_end(), MeshBase::nodes_begin(), and MeshBase::nodes_end().

Referenced by SFCPartitioner::_do_partition(), MetisPartitioner::_do_partition(), LinearPartitioner::_do_partition(), CentroidPartitioner::_do_partition(), _do_repartition(), Partitioner::partition(), and Partitioner::repartition().

{
  START_LOG('single_partition()','Partitioner');
  
  // Loop over all the elements and assign them to processor 0.
  MeshBase::element_iterator       elem_it  = mesh.elements_begin();
  const MeshBase::element_iterator elem_end = mesh.elements_end(); 

  for ( ; elem_it != elem_end; ++elem_it)
    (*elem_it)->processor_id() = 0;

  // For a single partition, all the nodes are on processor 0
  MeshBase::node_iterator       node_it  = mesh.nodes_begin();
  const MeshBase::node_iterator node_end = mesh.nodes_end();
  
  for ( ; node_it != node_end; ++node_it)
    (*node_it)->processor_id() = 0;

  STOP_LOG('single_partition()','Partitioner');
}
 

Member Data Documentation

 

std::vector<int> ParmetisPartitioner::_adjncy [private]

Definition at line 118 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and build_graph().  

int ParmetisPartitioner::_edgecut [private]

Definition at line 129 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

std::map<unsigned int, unsigned int> ParmetisPartitioner::_global_index_by_pid_map [private]Maps active element ids into a contiguous range, as needed by ParMETIS.

Definition at line 110 of file parmetis_partitioner.h.

Referenced by assign_partitioning(), build_graph(), and initialize().  

std::vector<unsigned int> ParmetisPartitioner::_n_active_elem_on_proc [private]The number of active elements on each processor. Note that ParMETIS requires that each processor have some active elements, it will abort if any processor passes a NULL _part array.

Definition at line 105 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

int ParmetisPartitioner::_ncon [private]

Definition at line 126 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

int ParmetisPartitioner::_nparts [private]

Definition at line 128 of file parmetis_partitioner.h.

Referenced by _do_repartition(), assign_partitioning(), and initialize().  

int ParmetisPartitioner::_numflag [private]

Definition at line 127 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

std::vector<int> ParmetisPartitioner::_options [private]

Definition at line 122 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

std::vector<int> ParmetisPartitioner::_part [private]

Definition at line 119 of file parmetis_partitioner.h.

Referenced by _do_repartition(), assign_partitioning(), and initialize().  

std::vector<float> ParmetisPartitioner::_tpwgts [private]

Definition at line 120 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

std::vector<float> ParmetisPartitioner::_ubvec [private]

Definition at line 121 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

std::vector<int> ParmetisPartitioner::_vtxdist [private]Data structures used by ParMETIS to describe the connectivity graph of the mesh. Consult the ParMETIS documentation.

Definition at line 116 of file parmetis_partitioner.h.

Referenced by _do_repartition(), assign_partitioning(), build_graph(), and initialize().  

std::vector<int> ParmetisPartitioner::_vwgt [private]

Definition at line 123 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

int ParmetisPartitioner::_wgtflag [private]

Definition at line 125 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and initialize().  

std::vector<int> ParmetisPartitioner::_xadj [private]

Definition at line 117 of file parmetis_partitioner.h.

Referenced by _do_repartition(), and build_graph().  

const unsigned int Partitioner::communication_blocksize = 1000000 [static, protected, inherited]The blocksize to use when doing blocked parallel communication. This limits the maximum vector size which can be used in a single communication step.

Definition at line 140 of file partitioner.h.

Referenced by Partitioner::set_parent_processor_ids().

 

Author

Generated automatically by Doxygen for libMesh from the source code.


 

Index

NAME
SYNOPSIS
Public Member Functions
Static Public Member Functions
Protected Member Functions
Static Protected Attributes
Private Member Functions
Private Attributes
Detailed Description
Constructor & Destructor Documentation
ParmetisPartitioner::ParmetisPartitioner () [inline]Constructor.
Member Function Documentation
void ParmetisPartitioner::_do_partition (MeshBase &mesh, const unsigned intn) [protected, virtual]Partition the MeshBase into n subdomains.
void ParmetisPartitioner::_do_repartition (MeshBase &mesh, const unsigned intn) [protected, virtual]Parmetis can handle dynamically repartitioning a mesh such that the redistribution costs are minimized. This method takes a previously partitioned domain (which may have then been adaptively refined) and repartitions it.
void ParmetisPartitioner::assign_partitioning (MeshBase &mesh) [private]Assign the computed partitioning to the mesh.
void ParmetisPartitioner::build_graph (const MeshBase &mesh) [private]Build the graph.
virtual AutoPtr<Partitioner> ParmetisPartitioner::clone () const [inline, virtual]Creates a new partitioner of this type and returns it in an AutoPtr.
void ParmetisPartitioner::initialize (const MeshBase &mesh, const unsigned intn_sbdmns) [private]Initialize data structures.
void Partitioner::partition (MeshBase &mesh, const unsigned intn = libMesh::n_processors()) [inherited]Partition the MeshBase into n parts. If the user does not specify a number of pieces into which the mesh should be partitioned, then the default behavior of the partitioner is to partition according to the number of processors defined in libMesh::n_processors(). The partitioner currently does not modify the subdomain_id of each element. This number is reserved for things like material properties, etc.
void Partitioner::partition_unpartitioned_elements (MeshBase &mesh, const unsigned intn = libMesh::n_processors()) [static, inherited]This function
void Partitioner::repartition (MeshBase &mesh, const unsigned intn = libMesh::n_processors()) [inherited]Repartitions the MeshBase into n parts. This is required since some partitoning algorithms can repartition more efficiently than computing a new partitioning from scratch. The default behavior is to simply call this->partition(n)
void Partitioner::set_node_processor_ids (MeshBase &mesh) [static, inherited]This function is called after partitioning to set the processor IDs for the nodes. By definition, a Node's processor ID is the minimum processor ID for all of the elements which share the node.
void Partitioner::set_parent_processor_ids (MeshBase &mesh) [static, inherited]This function is called after partitioning to set the processor IDs for the inactive parent elements. A Parent's processor ID is the same as its first child.
void Partitioner::single_partition (MeshBase &mesh) [protected, inherited]Trivially 'partitions' the mesh for one processor. Simply loops through the elements and assigns all of them to processor 0. Is is provided as a separate function so that derived classes may use it without reimplementing it.
Member Data Documentation
std::vector<int> ParmetisPartitioner::_adjncy [private]
int ParmetisPartitioner::_edgecut [private]
std::map<unsigned int, unsigned int> ParmetisPartitioner::_global_index_by_pid_map [private]Maps active element ids into a contiguous range, as needed by ParMETIS.
std::vector<unsigned int> ParmetisPartitioner::_n_active_elem_on_proc [private]The number of active elements on each processor. Note that ParMETIS requires that each processor have some active elements, it will abort if any processor passes a NULL _part array.
int ParmetisPartitioner::_ncon [private]
int ParmetisPartitioner::_nparts [private]
int ParmetisPartitioner::_numflag [private]
std::vector<int> ParmetisPartitioner::_options [private]
std::vector<int> ParmetisPartitioner::_part [private]
std::vector<float> ParmetisPartitioner::_tpwgts [private]
std::vector<float> ParmetisPartitioner::_ubvec [private]
std::vector<int> ParmetisPartitioner::_vtxdist [private]Data structures used by ParMETIS to describe the connectivity graph of the mesh. Consult the ParMETIS documentation.
std::vector<int> ParmetisPartitioner::_vwgt [private]
int ParmetisPartitioner::_wgtflag [private]
std::vector<int> ParmetisPartitioner::_xadj [private]
const unsigned int Partitioner::communication_blocksize = 1000000 [static, protected, inherited]The blocksize to use when doing blocked parallel communication. This limits the maximum vector size which can be used in a single communication step.
Author

This document was created by man2html, using the manual pages.
Time: 21:52:01 GMT, April 16, 2011