Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 2a345ea

Browse files
committedFeb 27, 2020
Add GhostEntity for cell partitionType(). Remove the unused seperate copy/overlap
1 parent db0d4e9 commit 2a345ea

6 files changed

+30
-122
lines changed
 

‎opm/grid/common/GridPartitioning.cpp

+14-108
Original file line numberDiff line numberDiff line change
@@ -242,97 +242,6 @@ namespace Dune
242242
}
243243
}
244244

245-
/// \brief seperate overlap and ghost cells
246-
void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector<int>& cell_has_well,
247-
std::vector<int>& part_type, int layers)
248-
{
249-
250-
auto lid = grid.localIdSet();
251-
auto gid = grid.globalIdSet();
252-
part_type.resize(grid.numCells(), 0);
253-
254-
for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) {
255-
auto elem = *it;
256-
257-
if (elem.partitionType() == InteriorEntity) {
258-
auto id = lid.id(elem);
259-
260-
part_type[id] = 1;
261-
262-
for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) {
263-
auto inter = *iit;
264-
if ( inter.neighbor() ) {
265-
auto nab = inter.outside();
266-
int nab_lid = lid.id(nab);
267-
if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) {
268-
int nab_gid = gid.id(nab);
269-
270-
if ( cell_has_well[nab_gid] == 1 ) {
271-
part_type[nab_lid] = layers + 1;
272-
}
273-
else {
274-
part_type[nab_lid] = 2;
275-
}
276-
}
277-
}
278-
}
279-
}
280-
}
281-
282-
int layer = 2;
283-
while (layer < layers + 1) {
284-
for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) {
285-
286-
auto elem = *it;
287-
int id = lid.id(elem);
288-
bool isLayer = part_type[id] == layer || part_type[id] == layers + 1;
289-
290-
if (elem.partitionType() != InteriorEntity && isLayer) {
291-
for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) {
292-
293-
auto inter = *iit;
294-
if ( inter.neighbor() ) {
295-
296-
auto nab = inter.outside();
297-
int nab_gid = gid.id(nab);
298-
int nab_lid = lid.id(nab);
299-
300-
if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) {
301-
if (cell_has_well[nab_gid] == 1) {
302-
part_type[nab_lid] = layers + 1;
303-
}
304-
else {
305-
part_type[nab_lid] = layer + 1;
306-
}
307-
}
308-
}
309-
}
310-
}
311-
/*
312-
else if (elem.partitionType() != InteriorEntity && part_type[id] == layers + 1) {
313-
int gid_e = gid.id(elem);
314-
bool isWell = cell_has_well[gid_e] == 1;
315-
if ( isWell ) {
316-
for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) {
317-
auto inter = *iit;
318-
if ( inter.neighbor() ) {
319-
320-
auto nab = inter.outside();
321-
int nab_gid = gid.id(nab);
322-
int nab_lid = lid.id(nab);
323-
if (nab.partitionType() != InteriorEntity) {
324-
part_type[nab_lid] = layers + 1;
325-
}
326-
}
327-
}
328-
}
329-
}
330-
*/
331-
}
332-
layer++;
333-
}
334-
}
335-
336245
/// \brief Adds cells to the overlap that just share a point with an owner cell.
337246
void addOverlapCornerCell(const CpGrid& grid, int owner,
338247
const CpGrid::Codim<0>::Entity& from,
@@ -499,9 +408,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
499408
auto ownerSize = exportList.size();
500409
const CpGrid::LeafIndexSet& ix = grid.leafIndexSet();
501410
std::map<int,int> exportProcs, importProcs;
502-
503-
std::vector<std::tuple<int,int,char>> ghostList;
504-
411+
505412
for (CpGrid::Codim<0>::LeafIterator it = grid.leafbegin<0>();
506413
it != grid.leafend<0>(); ++it) {
507414
int index = ix.index(*it);
@@ -544,7 +451,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
544451
// Remove duplicate cells in overlap layer.
545452
auto newEnd = std::unique(ownerEnd, exportList.end(), overlapEqual);
546453
exportList.resize(newEnd - exportList.begin());
547-
454+
548455
for(const auto& entry: importList)
549456
importProcs.insert(std::make_pair(std::get<1>(entry), 0));
550457
//count entries to send
@@ -596,24 +503,24 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
596503

597504
MPI_Waitall(requests.size(), requests.data(), statuses.data());
598505

599-
// -------------- Communicate overlap type
506+
// Communicate overlap type
600507
++tag;
601508
std::vector<std::vector<int> > typeBuffers(importProcs.size());
602-
auto tbuffer = typeBuffers.begin();
509+
auto partitionTypeBuffer = typeBuffers.begin();
603510
req = requests.begin();
604511

605512
for(auto&& proc: importProcs)
606513
{
607-
tbuffer->resize(proc.second);
608-
MPI_Irecv(tbuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req));
609-
++req; ++tbuffer;
514+
partitionTypeBuffer->resize(proc.second);
515+
MPI_Irecv(partitionTypeBuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req));
516+
++req; ++partitionTypeBuffer;
610517
}
611518

612519
for(const auto& proc: exportProcs)
613520
{
614521
std::vector<int> sendBuffer;
615522
sendBuffer.reserve(proc.second);
616-
523+
617524
for (auto t = ownerEnd; t != exportList.end(); ++t) {
618525
if ( std::get<1>(*t) == proc.first ) {
619526
if ( std::get<2>(*t) == AttributeSet::copy)
@@ -622,21 +529,20 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
622529
sendBuffer.push_back(1);
623530
}
624531
}
625-
626532
MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc);
627533
}
628534

629535
std::inplace_merge(exportList.begin(), ownerEnd, exportList.end());
630536
MPI_Waitall(requests.size(), requests.data(), statuses.data());
631-
// ------------------------------
632-
537+
538+
// Add the overlap layer to the import list on each process.
633539
buffer = receiveBuffers.begin();
634-
tbuffer = typeBuffers.begin();
540+
partitionTypeBuffer = typeBuffers.begin();
635541
auto importOwnerSize = importList.size();
636-
542+
637543
for(const auto& proc: importProcs)
638544
{
639-
auto pt = tbuffer->begin();
545+
auto pt = partitionTypeBuffer->begin();
640546
for(const auto& index: *buffer) {
641547

642548
if (*pt == 0) {
@@ -648,7 +554,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
648554
++pt;
649555
}
650556
++buffer;
651-
++tbuffer;
557+
++partitionTypeBuffer;
652558
}
653559
std::sort(importList.begin() + importOwnerSize, importList.end(),
654560
[](const std::tuple<int,int,char,int>& t1, const std::tuple<int,int,char,int>& t2)

‎opm/grid/common/GridPartitioning.hpp

+4-6
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@
4343
#include <tuple>
4444

4545
#include <dune/common/parallel/mpihelper.hh>
46-
4746
namespace Dune
4847
{
4948

@@ -72,9 +71,6 @@ namespace Dune
7271
bool recursive = false,
7372
bool ensureConnectivity = true);
7473

75-
void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector<int>& cell_has_well,
76-
std::vector<int>& part_type, int layers);
77-
7874
/// \brief Adds a layer of overlap cells to a partitioning.
7975
/// \param[in] grid The grid that is partitioned.
8076
/// \param[in] cell_part a vector containing each cells partition number.
@@ -96,13 +92,15 @@ namespace Dune
9692
/// \param[inout] importList List indices to import, each entry is a tuple
9793
/// of global index, process rank (to import from), attribute here, local
9894
/// index here
95+
/// \param[in] cell_has_well integer list that indicate if cell i is perforated
96+
/// by a well.
9997
/// \param[in] cc The communication object
10098
/// \param[in] layer Number of overlap layers
10199
int addOverlapLayer(const CpGrid& grid,
102-
const std::vector<int>& cell_part,
100+
const std::vector<int>& cell_part,
103101
std::vector<std::tuple<int,int,char>>& exportList,
104102
std::vector<std::tuple<int,int,char,int>>& importList,
105-
const std::vector<int>& cell_has_well,
103+
const std::vector<int>& cell_has_well,
106104
const CollectiveCommunication<Dune::MPIHelper::MPICommunicator>& cc,
107105
int layers = 1);
108106

‎opm/grid/common/ZoltanPartition.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid,
7777
wells,
7878
transmissibilities,
7979
partitionIsEmpty,
80-
edgeWeightsMethod));
80+
edgeWeightsMethod));
8181
Dune::cpgrid::setCpGridZoltanGraphFunctions(zz, *grid_and_wells,
8282
partitionIsEmpty);
8383
}
@@ -104,7 +104,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid,
104104
int rank = cc.rank();
105105
std::vector<int> parts(size, rank);
106106
std::vector<std::vector<int> > wells_on_proc;
107-
// List entry: process to export to, (global) index, attribute there (not needed?)
107+
// List entry: (global) index, process to export to, attribute there
108108
std::vector<std::tuple<int,int,char>> myExportList(numExport);
109109
// List entry: process to import from, global index, attribute here, local index
110110
// (determined later)

‎opm/grid/common/ZoltanPartition.hpp

+6-3
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,13 @@ namespace cpgrid
4646
/// @param edgeWeightMethod The method used to calculate the weights associated
4747
/// with the edges of the graph (uniform, transmissibilities, log thereof)
4848
/// @param root The process number that holds the global grid.
49-
/// @return A pair consisting of a vector that contains for each local cell of the grid the
49+
/// @return A tuple consisting of a vector that contains for each local cell of the grid the
5050
/// the number of the process that owns it after repartitioning,
51-
/// and a set of names of wells that should be defunct in a parallel
52-
/// simulation.
51+
/// a set of names of wells that should be defunct in a parallel
52+
/// simulation, a vector of tuples exportList(global id, owner rank, attribute)
53+
/// containing information each rank needs for distributing the grid and a second
54+
/// vector of tuples importList(global id, send rank, attribute, local id) containing
55+
/// information about the cells of the grid each rank will receive.
5356
std::tuple<std::vector<int>,std::unordered_set<std::string>,
5457
std::vector<std::tuple<int,int,char> >,
5558
std::vector<std::tuple<int,int,char,int> > >

‎opm/grid/cpgrid/CpGrid.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,7 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector<cpgrid::OpmWellTy
146146
<< " Maybe scatterGrid was called before?"<<std::endl;
147147
return std::make_pair(false, std::unordered_set<std::string>());
148148
}
149+
149150
#if HAVE_MPI
150151
auto& cc = data_->ccobj_;
151152

@@ -214,7 +215,6 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector<cpgrid::OpmWellTy
214215
return std::get<0>(t1) < std::get<0>(t2);
215216
};
216217

217-
218218
if ( ! ownersFirst )
219219
{
220220
// merge owner and overlap sorted by global index

‎opm/grid/cpgrid/CpGridData.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -1574,9 +1574,10 @@ void CpGridData::distributeGlobalGrid(CpGrid& grid,
15741574
partition_type_indicator_->cell_indicator_.resize(cell_indexset_.size());
15751575
for(const auto i: cell_indexset_)
15761576
{
1577+
auto ci_attr = i.local().attribute();
15771578
partition_type_indicator_->cell_indicator_[i.local()]=
1578-
i.local().attribute()==AttributeSet::owner?
1579-
InteriorEntity:OverlapEntity;
1579+
ci_attr==AttributeSet::owner?
1580+
InteriorEntity: ci_attr==AttributeSet::copy? GhostEntity:OverlapEntity;
15801581
}
15811582

15821583
// Compute partition type for points

0 commit comments

Comments
 (0)
Please sign in to comment.