decompositionMethod.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration | Website: https://openfoam.org
5  \\ / A nd | Copyright (C) 2011-2022 OpenFOAM Foundation
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*---------------------------------------------------------------------------*/
25 
26 #include "decompositionMethod.H"
27 #include "Time.H"
28 #include "globalIndex.H"
29 #include "syncTools.H"
30 #include "Tuple2.H"
31 #include "faceSet.H"
32 #include "regionSplit.H"
33 #include "localPointRegion.H"
34 #include "minData.H"
35 #include "FaceCellWave.H"
36 
41 
42 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
43 
44 namespace Foam
45 {
49 }
50 
51 
52 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
53 
55 (
56  const dictionary& decompositionDict
57 )
58 :
59  decompositionDict_(decompositionDict),
60  nProcessors_
61  (
62  decompositionDict.lookup<label>("numberOfSubdomains")
63  )
64 {
65  // Read any constraints
66  wordList constraintTypes_;
67 
68  if (decompositionDict_.found("constraints"))
69  {
70  const dictionary& constraintsList = decompositionDict_.subDict
71  (
72  "constraints"
73  );
74  forAllConstIter(dictionary, constraintsList, iter)
75  {
76  const dictionary& dict = iter().dict();
77 
78  constraintTypes_.append(dict.lookup("type"));
79 
80  constraints_.append
81  (
83  (
84  dict,
85  constraintTypes_.last()
86  )
87  );
88  }
89  }
90 }
91 
92 
93 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
94 
97 (
98  const dictionary& decompositionDict
99 )
100 {
101  const word methodType
102  (
103  decompositionDict.lookupBackwardsCompatible<word>
104  (
105  {"decomposer", "method"}
106  )
107  );
108 
109  Info<< "Selecting decomposer " << methodType << endl;
110 
111  libs.open
112  (
113  decompositionDict,
114  "libs",
115  decomposerConstructorTablePtr_
116  );
117 
118  decomposerConstructorTable::iterator cstrIter =
119  decomposerConstructorTablePtr_->find(methodType);
120 
121  if (cstrIter == decomposerConstructorTablePtr_->end())
122  {
124  << "Unknown decomposer "
125  << methodType << nl << nl
126  << "Valid decomposers are : " << endl
127  << decomposerConstructorTablePtr_->sortedToc()
128  << exit(FatalError);
129  }
130 
131  return autoPtr<decompositionMethod>(cstrIter()(decompositionDict));
132 }
133 
134 
137 (
138  const dictionary& distributionDict
139 )
140 {
141  const word methodType
142  (
143  distributionDict.lookupBackwardsCompatible<word>
144  (
145  {"distributor", "method"}
146  )
147  );
148 
149  Info<< "Selecting distributor " << methodType << endl;
150 
151  libs.open
152  (
153  distributionDict,
154  "libs",
155  distributorConstructorTablePtr_
156  );
157 
158  distributorConstructorTable::iterator cstrIter =
159  distributorConstructorTablePtr_->find(methodType);
160 
161  if (cstrIter == distributorConstructorTablePtr_->end())
162  {
164  << "Unknown distributor "
165  << methodType << nl << nl
166  << "Valid distributors are : " << endl
167  << distributorConstructorTablePtr_->sortedToc()
168  << exit(FatalError);
169  }
170 
171  return autoPtr<decompositionMethod>(cstrIter()(distributionDict));
172 }
173 
174 
176 {
177  return IOdictionary
178  (
179  IOobject
180  (
181  "decomposeParDict",
182  time.system(),
183  time,
186  false
187  )
188  );
189 }
190 
191 
193 (
194  const polyMesh& mesh,
195  const pointField& points
196 )
197 {
198  scalarField weights(points.size(), 1.0);
199 
200  return decompose(mesh, points, weights);
201 }
202 
203 
205 (
206  const polyMesh& mesh,
207  const labelList& fineToCoarse,
208  const pointField& coarsePoints,
209  const scalarField& coarseWeights
210 )
211 {
212  CompactListList<label> coarseCellCells;
213  calcCellCells
214  (
215  mesh,
216  fineToCoarse,
217  coarsePoints.size(),
218  true, // use global cell labels
219  coarseCellCells
220  );
221 
222  // Decompose based on agglomerated points
223  labelList coarseDistribution
224  (
225  decompose
226  (
227  coarseCellCells.list(),
228  coarsePoints,
229  coarseWeights
230  )
231  );
232 
233  // Rework back into decomposition for original mesh_
234  labelList fineDistribution(fineToCoarse.size());
235 
236  forAll(fineDistribution, i)
237  {
238  fineDistribution[i] = coarseDistribution[fineToCoarse[i]];
239  }
240 
241  return fineDistribution;
242 }
243 
244 
246 (
247  const polyMesh& mesh,
248  const labelList& fineToCoarse,
249  const pointField& coarsePoints
250 )
251 {
252  scalarField cWeights(coarsePoints.size(), 1.0);
253 
254  return decompose
255  (
256  mesh,
257  fineToCoarse,
258  coarsePoints,
259  cWeights
260  );
261 }
262 
263 
265 (
266  const labelListList& globalCellCells,
267  const pointField& cc
268 )
269 {
270  scalarField cWeights(cc.size(), 1.0);
271 
272  return decompose(globalCellCells, cc, cWeights);
273 }
274 
275 
277 (
278  const polyMesh& mesh,
279  const labelList& agglom,
280  const label nLocalCoarse,
281  const bool parallel,
282  CompactListList<label>& cellCells
283 )
284 {
285  const labelList& faceOwner = mesh.faceOwner();
286  const labelList& faceNeighbour = mesh.faceNeighbour();
287  const polyBoundaryMesh& patches = mesh.boundaryMesh();
288 
289 
290  // Create global cell numbers
291  // ~~~~~~~~~~~~~~~~~~~~~~~~~~
292 
293  globalIndex globalAgglom
294  (
295  nLocalCoarse,
298  parallel
299  );
300 
301 
302  // Get agglomerate owner on other side of coupled faces
303  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
304 
305  labelList globalNeighbour(mesh.nFaces() - mesh.nInternalFaces());
306 
308  {
309  const polyPatch& pp = patches[patchi];
310 
311  if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
312  {
313  label facei = pp.start();
314  label bFacei = pp.start() - mesh.nInternalFaces();
315 
316  forAll(pp, i)
317  {
318  globalNeighbour[bFacei] = globalAgglom.toGlobal
319  (
320  agglom[faceOwner[facei]]
321  );
322 
323  bFacei++;
324  facei++;
325  }
326  }
327  }
328 
329  // Get the cell on the other side of coupled patches
330  syncTools::swapBoundaryFaceList(mesh, globalNeighbour);
331 
332 
333  // Count number of faces (internal + coupled)
334  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
335 
336  // Number of faces per coarse cell
337  labelList nFacesPerCell(nLocalCoarse, 0);
338 
339  for (label facei = 0; facei < mesh.nInternalFaces(); facei++)
340  {
341  label own = agglom[faceOwner[facei]];
342  label nei = agglom[faceNeighbour[facei]];
343 
344  nFacesPerCell[own]++;
345  nFacesPerCell[nei]++;
346  }
347 
349  {
350  const polyPatch& pp = patches[patchi];
351 
352  if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
353  {
354  label facei = pp.start();
355  label bFacei = pp.start() - mesh.nInternalFaces();
356 
357  forAll(pp, i)
358  {
359  label own = agglom[faceOwner[facei]];
360 
361  label globalNei = globalNeighbour[bFacei];
362  if
363  (
364  !globalAgglom.isLocal(globalNei)
365  || globalAgglom.toLocal(globalNei) != own
366  )
367  {
368  nFacesPerCell[own]++;
369  }
370 
371  facei++;
372  bFacei++;
373  }
374  }
375  }
376 
377 
378  // Fill in offset and data
379  // ~~~~~~~~~~~~~~~~~~~~~~~
380 
381  cellCells.setSize(nFacesPerCell);
382 
383  nFacesPerCell = 0;
384 
385  labelUList& m = cellCells.m();
386  const labelList& offsets = cellCells.offsets();
387 
388  // For internal faces is just offsetted owner and neighbour
389  for (label facei = 0; facei < mesh.nInternalFaces(); facei++)
390  {
391  label own = agglom[faceOwner[facei]];
392  label nei = agglom[faceNeighbour[facei]];
393 
394  m[offsets[own] + nFacesPerCell[own]++] = globalAgglom.toGlobal(nei);
395  m[offsets[nei] + nFacesPerCell[nei]++] = globalAgglom.toGlobal(own);
396  }
397 
398  // For boundary faces is offsetted coupled neighbour
400  {
401  const polyPatch& pp = patches[patchi];
402 
403  if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
404  {
405  label facei = pp.start();
406  label bFacei = pp.start() - mesh.nInternalFaces();
407 
408  forAll(pp, i)
409  {
410  label own = agglom[faceOwner[facei]];
411 
412  label globalNei = globalNeighbour[bFacei];
413 
414  if
415  (
416  !globalAgglom.isLocal(globalNei)
417  || globalAgglom.toLocal(globalNei) != own
418  )
419  {
420  m[offsets[own] + nFacesPerCell[own]++] = globalNei;
421  }
422 
423  facei++;
424  bFacei++;
425  }
426  }
427  }
428 
429 
430  // Check for duplicates connections between cells
431  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
432  // Done as postprocessing step since we now have cellCells.
433  label newIndex = 0;
434  labelHashSet nbrCells;
435 
436 
437  if (cellCells.size() == 0)
438  {
439  return;
440  }
441 
442  label startIndex = cellCells.offsets()[0];
443 
444  forAll(cellCells, celli)
445  {
446  nbrCells.clear();
447  nbrCells.insert(globalAgglom.toGlobal(celli));
448 
449  label endIndex = cellCells.offsets()[celli+1];
450 
451  for (label i = startIndex; i < endIndex; i++)
452  {
453  if (nbrCells.insert(cellCells.m()[i]))
454  {
455  cellCells.m()[newIndex++] = cellCells.m()[i];
456  }
457  }
458  startIndex = endIndex;
459  cellCells.offsets()[celli+1] = newIndex;
460  }
461 
462  cellCells.setSize(cellCells.size(), newIndex);
463 }
464 
465 
467 (
468  const polyMesh& mesh,
469  const labelList& agglom,
470  const label nLocalCoarse,
471  const bool parallel,
472  CompactListList<label>& cellCells,
473  CompactListList<scalar>& cellCellWeights
474 )
475 {
476  const labelList& faceOwner = mesh.faceOwner();
477  const labelList& faceNeighbour = mesh.faceNeighbour();
478  const polyBoundaryMesh& patches = mesh.boundaryMesh();
479 
480 
481  // Create global cell numbers
482  // ~~~~~~~~~~~~~~~~~~~~~~~~~~
483 
484  globalIndex globalAgglom
485  (
486  nLocalCoarse,
489  parallel
490  );
491 
492 
493  // Get agglomerate owner on other side of coupled faces
494  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
495 
496  labelList globalNeighbour(mesh.nFaces() - mesh.nInternalFaces());
497 
499  {
500  const polyPatch& pp = patches[patchi];
501 
502  if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
503  {
504  label faceI = pp.start();
505  label bFaceI = pp.start() - mesh.nInternalFaces();
506 
507  forAll(pp, i)
508  {
509  globalNeighbour[bFaceI] = globalAgglom.toGlobal
510  (
511  agglom[faceOwner[faceI]]
512  );
513 
514  bFaceI++;
515  faceI++;
516  }
517  }
518  }
519 
520  // Get the cell on the other side of coupled patches
521  syncTools::swapBoundaryFaceList(mesh, globalNeighbour);
522 
523 
524  // Count number of faces (internal + coupled)
525  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
526 
527  // Number of faces per coarse cell
528  labelList nFacesPerCell(nLocalCoarse, 0);
529 
530  for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
531  {
532  label own = agglom[faceOwner[faceI]];
533  label nei = agglom[faceNeighbour[faceI]];
534 
535  nFacesPerCell[own]++;
536  nFacesPerCell[nei]++;
537  }
538 
540  {
541  const polyPatch& pp = patches[patchi];
542 
543  if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
544  {
545  label faceI = pp.start();
546  label bFaceI = pp.start()-mesh.nInternalFaces();
547 
548  forAll(pp, i)
549  {
550  label own = agglom[faceOwner[faceI]];
551 
552  label globalNei = globalNeighbour[bFaceI];
553  if
554  (
555  !globalAgglom.isLocal(globalNei)
556  || globalAgglom.toLocal(globalNei) != own
557  )
558  {
559  nFacesPerCell[own]++;
560  }
561 
562  faceI++;
563  bFaceI++;
564  }
565  }
566  }
567 
568 
569  // Fill in offset and data
570  // ~~~~~~~~~~~~~~~~~~~~~~~
571 
572  cellCells.setSize(nFacesPerCell);
573  cellCellWeights.setSize(nFacesPerCell);
574 
575  nFacesPerCell = 0;
576 
577  labelUList& m = cellCells.m();
578  scalarUList& w = cellCellWeights.m();
579  const labelList& offsets = cellCells.offsets();
580 
581  // For internal faces is just offsetted owner and neighbour
582  for (label faceI = 0; faceI < mesh.nInternalFaces(); faceI++)
583  {
584  label own = agglom[faceOwner[faceI]];
585  label nei = agglom[faceNeighbour[faceI]];
586 
587  label ownIndex = offsets[own] + nFacesPerCell[own]++;
588  label neiIndex = offsets[nei] + nFacesPerCell[nei]++;
589 
590  m[ownIndex] = globalAgglom.toGlobal(nei);
591  w[ownIndex] = mesh.magFaceAreas()[faceI];
592  m[neiIndex] = globalAgglom.toGlobal(own);
593  w[ownIndex] = mesh.magFaceAreas()[faceI];
594  }
595 
596  // For boundary faces is offsetted coupled neighbour
598  {
599  const polyPatch& pp = patches[patchi];
600 
601  if (pp.coupled() && (parallel || !isA<processorPolyPatch>(pp)))
602  {
603  label faceI = pp.start();
604  label bFaceI = pp.start()-mesh.nInternalFaces();
605 
606  forAll(pp, i)
607  {
608  label own = agglom[faceOwner[faceI]];
609 
610  label globalNei = globalNeighbour[bFaceI];
611 
612  if
613  (
614  !globalAgglom.isLocal(globalNei)
615  || globalAgglom.toLocal(globalNei) != own
616  )
617  {
618  label ownIndex = offsets[own] + nFacesPerCell[own]++;
619  m[ownIndex] = globalNei;
620  w[ownIndex] = mesh.magFaceAreas()[faceI];
621  }
622 
623  faceI++;
624  bFaceI++;
625  }
626  }
627  }
628 
629 
630  // Check for duplicates connections between cells
631  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
632  // Done as postprocessing step since we now have cellCells.
633  label newIndex = 0;
634  labelHashSet nbrCells;
635 
636 
637  if (cellCells.size() == 0)
638  {
639  return;
640  }
641 
642  label startIndex = cellCells.offsets()[0];
643 
644  forAll(cellCells, cellI)
645  {
646  nbrCells.clear();
647  nbrCells.insert(globalAgglom.toGlobal(cellI));
648 
649  label endIndex = cellCells.offsets()[cellI+1];
650 
651  for (label i = startIndex; i < endIndex; i++)
652  {
653  if (nbrCells.insert(cellCells.m()[i]))
654  {
655  cellCells.m()[newIndex] = cellCells.m()[i];
656  cellCellWeights.m()[newIndex] = cellCellWeights.m()[i];
657  newIndex++;
658  }
659  }
660  startIndex = endIndex;
661  cellCells.offsets()[cellI+1] = newIndex;
662  cellCellWeights.offsets()[cellI+1] = newIndex;
663  }
664 
665  cellCells.setSize(cellCells.size(), newIndex);
666  cellCellWeights.setSize(cellCells.size(), newIndex);
667 }
668 
669 
671 (
672  const polyMesh& mesh,
673  const scalarField& cellWeights,
674 
675  //- Whether owner and neighbour should be on same processor
676  // (takes priority over explicitConnections)
677  const boolList& blockedFace,
678 
679  //- Whether whole sets of faces (and point neighbours) need to be kept
680  // on single processor
681  const PtrList<labelList>& specifiedProcessorFaces,
682  const labelList& specifiedProcessor,
683 
684  //- Additional connections between boundary faces
685  const List<labelPair>& explicitConnections
686 )
687 {
688  // Any weights specified?
689  label nWeights = returnReduce(cellWeights.size(), sumOp<label>());
690 
691  // Any processor sets?
692  label nProcSets = 0;
693  forAll(specifiedProcessorFaces, setI)
694  {
695  nProcSets += specifiedProcessorFaces[setI].size();
696  }
697  reduce(nProcSets, sumOp<label>());
698 
699  // Any non-mesh connections?
700  label nConnections = returnReduce
701  (
702  explicitConnections.size(),
703  sumOp<label>()
704  );
705 
706  // Any faces not blocked?
707  label nUnblocked = 0;
708  forAll(blockedFace, facei)
709  {
710  if (!blockedFace[facei])
711  {
712  nUnblocked++;
713  }
714  }
715  reduce(nUnblocked, sumOp<label>());
716 
717 
718  // Either do decomposition on cell centres or on agglomeration
719 
720  labelList finalDecomp;
721 
722 
723  if (nProcSets+nConnections+nUnblocked == 0)
724  {
725  // No constraints, possibly weights
726 
727  if (nWeights > 0)
728  {
729  finalDecomp = decompose
730  (
731  mesh,
732  mesh.cellCentres(),
733  cellWeights
734  );
735  }
736  else
737  {
738  finalDecomp = decompose(mesh, mesh.cellCentres());
739  }
740  }
741  else
742  {
743  if (debug)
744  {
745  Info<< "Constrained decomposition:" << endl
746  << " faces with same owner and neighbour processor : "
747  << nUnblocked << endl
748  << " baffle faces with same owner processor : "
749  << nConnections << endl
750  << " faces all on same processor : "
751  << nProcSets << endl << endl;
752  }
753 
754  // Determine local regions, separated by blockedFaces
755  regionSplit localRegion(mesh, blockedFace, explicitConnections, false);
756 
757 
758  if (debug)
759  {
760  Info<< "Constrained decomposition:" << endl
761  << " split into " << localRegion.nLocalRegions()
762  << " regions."
763  << endl;
764  }
765 
766  // Determine region cell centres
767  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
768 
769  // This just takes the first cell in the region. Otherwise the problem
770  // is with cyclics - if we'd average the region centre might be
771  // somewhere in the middle of the domain which might not be anywhere
772  // near any of the cells.
773 
774  pointField regionCentres(localRegion.nLocalRegions(), point::max);
775 
776  forAll(localRegion, celli)
777  {
778  label regionI = localRegion[celli];
779 
780  if (regionCentres[regionI] == point::max)
781  {
782  regionCentres[regionI] = mesh.cellCentres()[celli];
783  }
784  }
785 
786  // Do decomposition on agglomeration
787  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
788 
789  scalarField regionWeights(localRegion.nLocalRegions(), 0);
790 
791  if (nWeights > 0)
792  {
793  forAll(localRegion, celli)
794  {
795  label regionI = localRegion[celli];
796 
797  regionWeights[regionI] += cellWeights[celli];
798  }
799  }
800  else
801  {
802  forAll(localRegion, celli)
803  {
804  label regionI = localRegion[celli];
805 
806  regionWeights[regionI] += 1.0;
807  }
808  }
809 
810  finalDecomp = decompose
811  (
812  mesh,
813  localRegion,
814  regionCentres,
815  regionWeights
816  );
817 
818 
819 
820  // Implement the explicitConnections since above decompose
821  // does not know about them
822  forAll(explicitConnections, i)
823  {
824  const labelPair& baffle = explicitConnections[i];
825  label f0 = baffle.first();
826  label f1 = baffle.second();
827 
828  if (!blockedFace[f0] && !blockedFace[f1])
829  {
830  // Note: what if internal faces and owner and neighbour on
831  // different processor? So for now just push owner side
832  // proc
833 
834  const label proci = finalDecomp[mesh.faceOwner()[f0]];
835 
836  finalDecomp[mesh.faceOwner()[f1]] = proci;
837  if (mesh.isInternalFace(f1))
838  {
839  finalDecomp[mesh.faceNeighbour()[f1]] = proci;
840  }
841  }
842  else if (blockedFace[f0] != blockedFace[f1])
843  {
845  << "On explicit connection between faces " << f0
846  << " and " << f1
847  << " the two blockedFace status are not equal : "
848  << blockedFace[f0] << " and " << blockedFace[f1]
849  << exit(FatalError);
850  }
851  }
852 
853 
854  // blockedFaces corresponding to processor faces need to be handled
855  // separately since not handled by local regionSplit. We need to
856  // walk now across coupled faces and make sure to move a whole
857  // global region across
858  if (Pstream::parRun())
859  {
860  // Re-do regionSplit
861 
862  // Field on cells and faces.
863  List<minData> cellData(mesh.nCells());
864  List<minData> faceData(mesh.nFaces());
865 
866  // Take over blockedFaces by seeding a negative number
867  // (so is always less than the decomposition)
868  label nUnblocked = 0;
869  forAll(blockedFace, facei)
870  {
871  if (blockedFace[facei])
872  {
873  faceData[facei] = minData(-123);
874  }
875  else
876  {
877  nUnblocked++;
878  }
879  }
880 
881  // Seed unblocked faces with destination processor
882  labelList seedFaces(nUnblocked);
883  List<minData> seedData(nUnblocked);
884  nUnblocked = 0;
885 
886  forAll(blockedFace, facei)
887  {
888  if (!blockedFace[facei])
889  {
890  label own = mesh.faceOwner()[facei];
891  seedFaces[nUnblocked] = facei;
892  seedData[nUnblocked] = minData(finalDecomp[own]);
893  nUnblocked++;
894  }
895  }
896 
897 
898  // Propagate information inwards
899  FaceCellWave<minData> deltaCalc
900  (
901  mesh,
902  seedFaces,
903  seedData,
904  faceData,
905  cellData,
906  mesh.globalData().nTotalCells()+1
907  );
908 
909  // And extract
910  forAll(finalDecomp, celli)
911  {
912  if (cellData[celli].valid(deltaCalc.data()))
913  {
914  finalDecomp[celli] = cellData[celli].data();
915  }
916  }
917  }
918 
919 
920  // For specifiedProcessorFaces rework the cellToProc to enforce
921  // all on one processor since we can't guarantee that the input
922  // to regionSplit was a single region.
923  // E.g. faceSet 'a' with the cells split into two regions
924  // by a notch formed by two walls
925  //
926  // \ /
927  // \ /
928  // ---a----+-----a-----
929  //
930  //
931  // Note that reworking the cellToProc might make the decomposition
932  // unbalanced.
933  forAll(specifiedProcessorFaces, setI)
934  {
935  const labelList& set = specifiedProcessorFaces[setI];
936 
937  label proci = specifiedProcessor[setI];
938  if (proci == -1)
939  {
940  // If no processor specified use the one from the
941  // 0th element
942  proci = finalDecomp[mesh.faceOwner()[set[0]]];
943  }
944 
945  forAll(set, fI)
946  {
947  const face& f = mesh.faces()[set[fI]];
948  forAll(f, fp)
949  {
950  const labelList& pFaces = mesh.pointFaces()[f[fp]];
951  forAll(pFaces, i)
952  {
953  label facei = pFaces[i];
954 
955  finalDecomp[mesh.faceOwner()[facei]] = proci;
956  if (mesh.isInternalFace(facei))
957  {
958  finalDecomp[mesh.faceNeighbour()[facei]] = proci;
959  }
960  }
961  }
962  }
963  }
964 
965 
966  if (debug && Pstream::parRun())
967  {
968  labelList nbrDecomp;
969  syncTools::swapBoundaryCellList(mesh, finalDecomp, nbrDecomp);
970 
971  const polyBoundaryMesh& patches = mesh.boundaryMesh();
973  {
974  const polyPatch& pp = patches[patchi];
975  if (pp.coupled())
976  {
977  forAll(pp, i)
978  {
979  label facei = pp.start()+i;
980  label own = mesh.faceOwner()[facei];
981  label bFacei = facei-mesh.nInternalFaces();
982 
983  if (!blockedFace[facei])
984  {
985  label ownProc = finalDecomp[own];
986  label nbrProc = nbrDecomp[bFacei];
987  if (ownProc != nbrProc)
988  {
990  << "patch:" << pp.name()
991  << " face:" << facei
992  << " at:" << mesh.faceCentres()[facei]
993  << " ownProc:" << ownProc
994  << " nbrProc:" << nbrProc
995  << exit(FatalError);
996  }
997  }
998  }
999  }
1000  }
1001  }
1002  }
1003 
1004  return finalDecomp;
1005 }
1006 
1007 
1009 (
1010  const polyMesh& mesh,
1011  boolList& blockedFace,
1012  PtrList<labelList>& specifiedProcessorFaces,
1013  labelList& specifiedProcessor,
1014  List<labelPair>& explicitConnections
1015 )
1016 {
1017  blockedFace.setSize(mesh.nFaces());
1018  blockedFace = true;
1019 
1020  specifiedProcessorFaces.clear();
1021  explicitConnections.clear();
1022 
1023  forAll(constraints_, constraintI)
1024  {
1025  constraints_[constraintI].add
1026  (
1027  mesh,
1028  blockedFace,
1029  specifiedProcessorFaces,
1030  specifiedProcessor,
1031  explicitConnections
1032  );
1033  }
1034 }
1035 
1036 
1038 (
1039  const polyMesh& mesh,
1040  const boolList& blockedFace,
1041  const PtrList<labelList>& specifiedProcessorFaces,
1042  const labelList& specifiedProcessor,
1043  const List<labelPair>& explicitConnections,
1044  labelList& decomposition
1045 )
1046 {
1047  forAll(constraints_, constraintI)
1048  {
1049  constraints_[constraintI].apply
1050  (
1051  mesh,
1052  blockedFace,
1053  specifiedProcessorFaces,
1054  specifiedProcessor,
1055  explicitConnections,
1056  decomposition
1057  );
1058  }
1059 }
1060 
1061 
1063 (
1064  const polyMesh& mesh,
1065  const scalarField& cellWeights
1066 )
1067 {
1068  // Collect all constraints
1069 
1070  boolList blockedFace;
1071  PtrList<labelList> specifiedProcessorFaces;
1072  labelList specifiedProcessor;
1073  List<labelPair> explicitConnections;
1074  setConstraints
1075  (
1076  mesh,
1077  blockedFace,
1078  specifiedProcessorFaces,
1079  specifiedProcessor,
1080  explicitConnections
1081  );
1082 
1083 
1084  // Construct decomposition method and either do decomposition on
1085  // cell centres or on agglomeration
1086 
1087  labelList finalDecomp = decompose
1088  (
1089  mesh,
1090  cellWeights, // optional weights
1091  blockedFace, // any cells to be combined
1092  specifiedProcessorFaces,// any whole cluster of cells to be kept
1093  specifiedProcessor,
1094  explicitConnections // baffles
1095  );
1096 
1097 
1098  // Give any constraint the option of modifying the decomposition
1099 
1100  applyConstraints
1101  (
1102  mesh,
1103  blockedFace,
1104  specifiedProcessorFaces,
1105  specifiedProcessor,
1106  explicitConnections,
1107  finalDecomp
1108  );
1109 
1110  return finalDecomp;
1111 }
1112 
1113 
1114 // ************************************************************************* //
#define forAll(list, i)
Loop across all elements in list.
Definition: UList.H:434
#define forAllConstIter(Container, container, iter)
Iterate across all elements in the container object of type.
Definition: UList.H:477
void setSize(const label mRows)
Reset size of CompactListList.
Wave propagation of information through grid. Every iteration information goes through one layer of c...
Definition: FaceCellWave.H:79
const TrackingData & data() const
Additional data to be passed into container.
Definition: FaceCellWave.H:308
bool insert(const Key &key)
Insert a new entry.
Definition: HashSet.H:111
void clear()
Clear all entries from table.
Definition: HashTable.C:468
IOdictionary is derived from dictionary and IOobject to give the dictionary automatic IO functionalit...
Definition: IOdictionary.H:57
IOobject defines the attributes of an object for which implicit objectRegistry management is supporte...
Definition: IOobject.H:99
void append(const T &)
Append an element at the end of the list.
Definition: ListI.H:178
void size(const label)
Override size to be inconsistent with allocated storage.
Definition: ListI.H:164
void clear()
Clear the list, i.e. set size to zero.
Definition: ListI.H:125
void setSize(const label)
Reset size of List.
Definition: List.C:281
const Type & second() const
Return second.
Definition: Pair.H:110
const Type & first() const
Return first.
Definition: Pair.H:98
A templated 1D list of pointers to objects of type <T>, where the size of the array is known and used...
Definition: PtrList.H:75
void clear()
Clear the PtrList, i.e. set size to zero deleting all the.
Definition: PtrList.C:174
static const word & system()
Return system name.
Definition: TimePaths.H:112
Class to control time during OpenFOAM simulations that is also the top-level objectRegistry.
Definition: Time.H:76
List< Container > list() const
Convert to List<Container>
const UList< label > & offsets() const
Return the offset table (= size()+1)
const UList< T > & m() const
Return the packed matrix of data.
label size() const
Return the primary size, i.e. the number of rows.
T * data()
Return a pointer to the first data element,.
Definition: UListI.H:149
T & last()
Return the last element of the list.
Definition: UListI.H:128
static label worldComm
Default communicator (all processors)
Definition: UPstream.H:278
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:399
static int & msgType()
Message tag of standard messages.
Definition: UPstream.H:476
label size() const
Return the number of elements in the UPtrList.
Definition: UPtrListI.H:29
static const Form max
Definition: VectorSpace.H:115
static autoPtr< decompositionConstraint > New(const dictionary &constraintsDict, const word &type)
Return a reference to the selected decompositionConstraint.
Abstract base class for decomposition.
decompositionMethod(const dictionary &decompositionDict)
Construct given the decomposition dictionary.
PtrList< decompositionConstraint > constraints_
Optional constraints.
virtual labelList decompose(const pointField &points, const scalarField &pointWeights)
Return for every coordinate the wanted processor number.
void setConstraints(const polyMesh &mesh, boolList &blockedFace, PtrList< labelList > &specifiedProcessorFaces, labelList &specifiedProcessor, List< labelPair > &explicitConnections)
Helper: extract constraints:
static void calcCellCells(const polyMesh &mesh, const labelList &agglom, const label nLocalCoarse, const bool global, CompactListList< label > &cellCells)
Helper: determine (local or global) cellCells from mesh.
static autoPtr< decompositionMethod > NewDistributor(const dictionary &decompositionDict)
Return a reference to the selected decomposition method.
void applyConstraints(const polyMesh &mesh, const boolList &blockedFace, const PtrList< labelList > &specifiedProcessorFaces, const labelList &specifiedProcessor, const List< labelPair > &explicitConnections, labelList &finalDecomp)
Helper: apply constraints to a decomposition. This gives.
static autoPtr< decompositionMethod > NewDecomposer(const dictionary &decompositionDict)
Return a reference to the selected decomposition method.
static dictionary decomposeParDict(const Time &time)
Read and return the decomposeParDict.
A list of keyword definitions, which are a keyword followed by any number of values (e....
Definition: dictionary.H:160
const dictionary & subDict(const word &) const
Find and return a sub-dictionary.
Definition: dictionary.C:998
ITstream & lookupBackwardsCompatible(const wordList &, bool recursive=false, bool patternMatch=true) const
Find and return an entry data stream, trying a list of keywords.
Definition: dictionary.C:871
bool found(const word &, bool recursive=false, bool patternMatch=true) const
Search dictionary for given keyword.
Definition: dictionary.C:659
bool open(const fileName &libName, const bool verbose=true)
Open the named library, optionally with warnings if problems occur.
A face is a list of labels corresponding to mesh vertices.
Definition: face.H:76
Calculates a unique integer (label so might not have enough room - 2G max) for processor + local inde...
Definition: globalIndex.H:64
label toGlobal(const label i) const
From local to global.
Definition: globalIndexI.H:82
label toLocal(const label i) const
From global to local on current processor.
Definition: globalIndexI.H:117
bool isLocal(const label i) const
Is on local processor.
Definition: globalIndexI.H:95
label nTotalCells() const
Return total number of cells in decomposed mesh.
For use with FaceCellWave. Transports minimum passive data.
Definition: minData.H:64
const word & name() const
Return name.
Foam::polyBoundaryMesh.
Mesh consisting of general polyhedral cells.
Definition: polyMesh.H:80
virtual const faceList & faces() const
Return raw faces.
Definition: polyMesh.C:1374
virtual const labelList & faceOwner() const
Return face owner.
Definition: polyMesh.C:1387
const globalMeshData & globalData() const
Return parallel info.
Definition: polyMesh.C:1563
const polyBoundaryMesh & boundaryMesh() const
Return boundary mesh.
Definition: polyMesh.H:403
virtual const labelList & faceNeighbour() const
Return face neighbour.
Definition: polyMesh.C:1393
A patch is a list of labels that address the faces in the global face list.
Definition: polyPatch.H:70
virtual bool coupled() const
Return true if this patch is geometrically coupled (i.e. faces and.
Definition: polyPatch.H:290
label start() const
Return start label of this patch in the polyMesh face list.
Definition: polyPatch.H:280
label nCells() const
const vectorField & faceCentres() const
const vectorField & cellCentres() const
label nInternalFaces() const
label nFaces() const
const scalarField & magFaceAreas() const
const labelListList & pointFaces() const
bool isInternalFace(const label faceIndex) const
Return true if given face label is internal to the mesh.
This class separates the mesh into distinct unconnected regions, each of which is then given a label ...
Definition: regionSplit.H:118
label nLocalRegions() const
Return local number of regions.
Definition: regionSplit.H:202
static void swapBoundaryFaceList(const polyMesh &mesh, UList< T > &l)
Swap coupled boundary face values.
Definition: syncTools.H:436
static void swapBoundaryCellList(const polyMesh &mesh, const UList< T > &cellData, List< T > &neighbourCellData)
Swap to obtain neighbour cell values for all boundary faces.
A class for handling words, derived from string.
Definition: word.H:62
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:306
label patchi
const pointField & points
const fvPatchList & patches
bool valid(const PtrList< ModelType > &l)
Namespace for OpenFOAM.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:124
dlLibraryTable libs
Table of loaded dynamic libraries.
List< label > labelList
A List of labels.
Definition: labelList.H:56
defineRunTimeSelectionTable(reactionRateFlameArea, dictionary)
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:59
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:251
messageStream Info
void reduce(const List< UPstream::commsStruct > &comms, T &Value, const BinaryOp &bop, const int tag, const label comm)
T returnReduce(const T &Value, const BinaryOp &bop, const int tag=Pstream::msgType(), const label comm=UPstream::worldComm)
defineTypeNameAndDebug(combustionModel, 0)
HashSet< label, Hash< label > > labelHashSet
A HashSet with label keys.
Definition: HashSet.H:211
error FatalError
UList< label > labelUList
Definition: UList.H:65
static const char nl
Definition: Ostream.H:260
labelList f(nPoints)
Info<< "Finished reading KIVA file"<< endl;cellShapeList cellShapes(nPoints);labelList cellZoning(nPoints, -1);const cellModel &hex=*(cellModeller::lookup("hex"));labelList hexLabels(8);label activeCells=0;labelList pointMap(nPoints);forAll(pointMap, i){ pointMap[i]=i;}for(label i=0;i< nPoints;i++){ if(f[i] > 0.0) { hexLabels[0]=i;hexLabels[1]=i1tab[i];hexLabels[2]=i3tab[i1tab[i]];hexLabels[3]=i3tab[i];hexLabels[4]=i8tab[i];hexLabels[5]=i1tab[i8tab[i]];hexLabels[6]=i3tab[i1tab[i8tab[i]]];hexLabels[7]=i3tab[i8tab[i]];cellShapes[activeCells]=cellShape(hex, hexLabels);edgeList edges=cellShapes[activeCells].edges();forAll(edges, ei) { if(edges[ei].mag(points)< small) { label start=pointMap[edges[ei].start()];while(start !=pointMap[start]) { start=pointMap[start];} label end=pointMap[edges[ei].end()];while(end !=pointMap[end]) { end=pointMap[end];} label minLabel=min(start, end);pointMap[start]=pointMap[end]=minLabel;} } cellZoning[activeCells]=idreg[i];activeCells++;}}cellShapes.setSize(activeCells);cellZoning.setSize(activeCells);forAll(cellShapes, celli){ cellShape &cs=cellShapes[celli];forAll(cs, i) { cs[i]=pointMap[cs[i]];} cs.collapse();}label bcIDs[11]={-1, 0, 2, 4, -1, 5, -1, 6, 7, 8, 9};const label nBCs=12;const word *kivaPatchTypes[nBCs]={ &wallPolyPatch::typeName, &wallPolyPatch::typeName, &wallPolyPatch::typeName, &wallPolyPatch::typeName, &symmetryPolyPatch::typeName, &wedgePolyPatch::typeName, &polyPatch::typeName, &polyPatch::typeName, &polyPatch::typeName, &polyPatch::typeName, &symmetryPolyPatch::typeName, &mergedCyclicPolyPatch::typeName};enum patchTypeNames{ PISTON, VALVE, LINER, CYLINDERHEAD, AXIS, WEDGE, INFLOW, OUTFLOW, PRESIN, PRESOUT, SYMMETRYPLANE, CYCLIC};const char *kivaPatchNames[nBCs]={ "piston", "valve", "liner", "cylinderHead", "axis", "wedge", "inflow", "outflow", "presin", "presout", "symmetryPlane", "cyclic"};List< SLList< face > > pFaces[nBCs]
Definition: readKivaGrid.H:229
dictionary dict