backgroundMeshDecomposition.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | Copyright (C) 2011-2015 OpenFOAM Foundation
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*---------------------------------------------------------------------------*/
25 
27 #include "meshSearch.H"
28 #include "conformationSurfaces.H"
30 #include "Time.H"
31 #include "Random.H"
32 #include "pointConversion.H"
33 
34 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
35 
36 namespace Foam
37 {
38 
39 defineTypeNameAndDebug(backgroundMeshDecomposition, 0);
40 
41 }
42 
43 
44 // * * * * * * * * * * * * * Static Member Functions * * * * * * * * * * * * //
45 
47 (
48  const List<label>& toProc
49 )
50 {
51  // Determine send map
52  // ~~~~~~~~~~~~~~~~~~
53 
54  // 1. Count
55  labelList nSend(Pstream::nProcs(), 0);
56 
57  forAll(toProc, i)
58  {
59  label procI = toProc[i];
60 
61  nSend[procI]++;
62  }
63 
64  // Send over how many I need to receive
65  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
66 
67  labelListList sendSizes(Pstream::nProcs());
68 
69  sendSizes[Pstream::myProcNo()] = nSend;
70 
71  combineReduce(sendSizes, UPstream::listEq());
72 
73  // 2. Size sendMap
74  labelListList sendMap(Pstream::nProcs());
75 
76  forAll(nSend, procI)
77  {
78  sendMap[procI].setSize(nSend[procI]);
79 
80  nSend[procI] = 0;
81  }
82 
83  // 3. Fill sendMap
84  forAll(toProc, i)
85  {
86  label procI = toProc[i];
87 
88  sendMap[procI][nSend[procI]++] = i;
89  }
90 
91  // Determine receive map
92  // ~~~~~~~~~~~~~~~~~~~~~
93 
94  labelListList constructMap(Pstream::nProcs());
95 
96  // Local transfers first
97  constructMap[Pstream::myProcNo()] = identity
98  (
99  sendMap[Pstream::myProcNo()].size()
100  );
101 
102  label constructSize = constructMap[Pstream::myProcNo()].size();
103 
104  forAll(constructMap, procI)
105  {
106  if (procI != Pstream::myProcNo())
107  {
108  label nRecv = sendSizes[procI][Pstream::myProcNo()];
109 
110  constructMap[procI].setSize(nRecv);
111 
112  for (label i = 0; i < nRecv; i++)
113  {
114  constructMap[procI][i] = constructSize++;
115  }
116  }
117  }
118 
119  return autoPtr<mapDistribute>
120  (
121  new mapDistribute
122  (
123  constructSize,
124  sendMap.xfer(),
125  constructMap.xfer()
126  )
127  );
128 }
129 
130 
131 // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
132 
133 void Foam::backgroundMeshDecomposition::initialRefinement()
134 {
135  volScalarField cellWeights
136  (
137  IOobject
138  (
139  "cellWeights",
140  mesh_.time().timeName(),
141  mesh_,
144  ),
145  mesh_,
146  dimensionedScalar("one", dimless, 1.0),
147  zeroGradientFvPatchScalarField::typeName
148  );
149 
150  const conformationSurfaces& geometry = geometryToConformTo_;
151 
152  decompositionMethod& decomposer = decomposerPtr_();
153 
154  volScalarField::InternalField& icellWeights = cellWeights.internalField();
155 
156 
157  // For each cell in the mesh has it been determined if it is fully
158  // inside, outside, or overlaps the surface
159  List<volumeType> volumeStatus
160  (
161  mesh_.nCells(),
163  );
164 
165  // Surface refinement
166  {
167  while (true)
168  {
169  // Determine/update the status of each cell
170  forAll(volumeStatus, cellI)
171  {
172  if (volumeStatus[cellI] == volumeType::UNKNOWN)
173  {
174  treeBoundBox cellBb
175  (
176  mesh_.cells()[cellI].points
177  (
178  mesh_.faces(),
179  mesh_.points()
180  )
181  );
182 
183  if (geometry.overlaps(cellBb))
184  {
185  volumeStatus[cellI] = volumeType::MIXED;
186  }
187  else if (geometry.inside(cellBb.midpoint()))
188  {
189  volumeStatus[cellI] = volumeType::INSIDE;
190  }
191  else
192  {
193  volumeStatus[cellI] = volumeType::OUTSIDE;
194  }
195  }
196  }
197 
198  {
199  labelList refCells = selectRefinementCells
200  (
201  volumeStatus,
202  cellWeights
203  );
204 
205  // Maintain 2:1 ratio
206  labelList newCellsToRefine
207  (
208  meshCutter_.consistentRefinement
209  (
210  refCells,
211  true // extend set
212  )
213  );
214 
215  forAll(newCellsToRefine, nCTRI)
216  {
217  label cellI = newCellsToRefine[nCTRI];
218 
219  if (volumeStatus[cellI] == volumeType::MIXED)
220  {
221  volumeStatus[cellI] = volumeType::UNKNOWN;
222  }
223 
224  icellWeights[cellI] = max
225  (
226  1.0,
227  icellWeights[cellI]/8.0
228  );
229  }
230 
231  if (returnReduce(newCellsToRefine.size(), sumOp<label>()) == 0)
232  {
233  break;
234  }
235 
236  // Mesh changing engine.
237  polyTopoChange meshMod(mesh_);
238 
239  // Play refinement commands into mesh changer.
240  meshCutter_.setRefinement(newCellsToRefine, meshMod);
241 
242  // Create mesh, return map from old to new mesh.
243  autoPtr<mapPolyMesh> map = meshMod.changeMesh
244  (
245  mesh_,
246  false, // inflate
247  true, // syncParallel
248  true, // orderCells (to reduce cell transfers)
249  false // orderPoints
250  );
251 
252  // Update fields
253  mesh_.updateMesh(map);
254 
255  // Update numbering of cells/vertices.
256  meshCutter_.updateMesh(map);
257 
258  {
259  // Map volumeStatus
260 
261  const labelList& cellMap = map().cellMap();
262 
263  List<volumeType> newVolumeStatus(cellMap.size());
264 
265  forAll(cellMap, newCellI)
266  {
267  label oldCellI = cellMap[newCellI];
268 
269  if (oldCellI == -1)
270  {
271  newVolumeStatus[newCellI] = volumeType::UNKNOWN;
272  }
273  else
274  {
275  newVolumeStatus[newCellI] = volumeStatus[oldCellI];
276  }
277  }
278 
279  volumeStatus.transfer(newVolumeStatus);
280  }
281 
282  Info<< " Background mesh refined from "
283  << returnReduce(map().nOldCells(), sumOp<label>())
284  << " to " << mesh_.globalData().nTotalCells()
285  << " cells." << endl;
286  }
287 
288  // Determine/update the status of each cell
289  forAll(volumeStatus, cellI)
290  {
291  if (volumeStatus[cellI] == volumeType::UNKNOWN)
292  {
293  treeBoundBox cellBb
294  (
295  mesh_.cells()[cellI].points
296  (
297  mesh_.faces(),
298  mesh_.points()
299  )
300  );
301 
302  if (geometry.overlaps(cellBb))
303  {
304  volumeStatus[cellI] = volumeType::MIXED;
305  }
306  else if (geometry.inside(cellBb.midpoint()))
307  {
308  volumeStatus[cellI] = volumeType::INSIDE;
309  }
310  else
311  {
312  volumeStatus[cellI] = volumeType::OUTSIDE;
313  }
314  }
315  }
316 
317  // Hard code switch for this stage for testing
318  bool removeOutsideCells = false;
319 
320  if (removeOutsideCells)
321  {
322  DynamicList<label> cellsToRemove;
323 
324  forAll(volumeStatus, cellI)
325  {
326  if (volumeStatus[cellI] == volumeType::OUTSIDE)
327  {
328  cellsToRemove.append(cellI);
329  }
330  }
331 
332  removeCells cellRemover(mesh_);
333 
334  // Mesh changing engine.
335  polyTopoChange meshMod(mesh_);
336 
337  labelList exposedFaces = cellRemover.getExposedFaces
338  (
339  cellsToRemove
340  );
341 
342  // Play refinement commands into mesh changer.
343  cellRemover.setRefinement
344  (
345  cellsToRemove,
346  exposedFaces,
347  labelList(exposedFaces.size(), 0), // patchID dummy
348  meshMod
349  );
350 
351  // Create mesh, return map from old to new mesh.
352  autoPtr<mapPolyMesh> map = meshMod.changeMesh
353  (
354  mesh_,
355  false, // inflate
356  true, // syncParallel
357  true, // orderCells (to reduce cell transfers)
358  false // orderPoints
359  );
360 
361  // Update fields
362  mesh_.updateMesh(map);
363 
364  // Update numbering of cells/vertices.
365  meshCutter_.updateMesh(map);
366  cellRemover.updateMesh(map);
367 
368  {
369  // Map volumeStatus
370 
371  const labelList& cellMap = map().cellMap();
372 
373  List<volumeType> newVolumeStatus(cellMap.size());
374 
375  forAll(cellMap, newCellI)
376  {
377  label oldCellI = cellMap[newCellI];
378 
379  if (oldCellI == -1)
380  {
381  newVolumeStatus[newCellI] = volumeType::UNKNOWN;
382  }
383  else
384  {
385  newVolumeStatus[newCellI] = volumeStatus[oldCellI];
386  }
387  }
388 
389  volumeStatus.transfer(newVolumeStatus);
390  }
391 
392  Info<< "Removed "
393  << returnReduce(map().nOldCells(), sumOp<label>())
394  - mesh_.globalData().nTotalCells()
395  << " cells." << endl;
396  }
397 
398  if (debug)
399  {
400  // const_cast<Time&>(mesh_.time())++;
401  // Info<< "Time " << mesh_.time().timeName() << endl;
402  meshCutter_.write();
403  mesh_.write();
404  cellWeights.write();
405  }
406 
407  labelList newDecomp = decomposer.decompose
408  (
409  mesh_,
410  mesh_.cellCentres(),
411  icellWeights
412  );
413 
414  fvMeshDistribute distributor(mesh_, mergeDist_);
415 
416  autoPtr<mapDistributePolyMesh> mapDist = distributor.distribute
417  (
418  newDecomp
419  );
420 
421  meshCutter_.distribute(mapDist);
422 
423  mapDist().distributeCellData(volumeStatus);
424 
425  if (debug)
426  {
427  printMeshData(mesh_);
428 
429  // const_cast<Time&>(mesh_.time())++;
430  // Info<< "Time " << mesh_.time().timeName() << endl;
431  meshCutter_.write();
432  mesh_.write();
433  cellWeights.write();
434  }
435  }
436  }
437 
438  if (debug)
439  {
440  // const_cast<Time&>(mesh_.time())++;
441  // Info<< "Time " << mesh_.time().timeName() << endl;
442  cellWeights.write();
443  mesh_.write();
444  }
445 
446  buildPatchAndTree();
447 }
448 
449 
450 void Foam::backgroundMeshDecomposition::printMeshData
451 (
452  const polyMesh& mesh
453 ) const
454 {
455  // Collect all data on master
456 
457  globalIndex globalCells(mesh.nCells());
458  // labelListList patchNeiProcNo(Pstream::nProcs());
459  // labelListList patchSize(Pstream::nProcs());
460  // const labelList& pPatches = mesh.globalData().processorPatches();
461  // patchNeiProcNo[Pstream::myProcNo()].setSize(pPatches.size());
462  // patchSize[Pstream::myProcNo()].setSize(pPatches.size());
463  // forAll(pPatches, i)
464  // {
465  // const processorPolyPatch& ppp = refCast<const processorPolyPatch>
466  // (
467  // mesh.boundaryMesh()[pPatches[i]]
468  // );
469  // patchNeiProcNo[Pstream::myProcNo()][i] = ppp.neighbProcNo();
470  // patchSize[Pstream::myProcNo()][i] = ppp.size();
471  // }
472  // Pstream::gatherList(patchNeiProcNo);
473  // Pstream::gatherList(patchSize);
474 
475 
476  // // Print stats
477 
478  // globalIndex globalBoundaryFaces(mesh.nFaces()-mesh.nInternalFaces());
479 
480  for (label procI = 0; procI < Pstream::nProcs(); procI++)
481  {
482  Info<< "Processor " << procI << " "
483  << "Number of cells = " << globalCells.localSize(procI)
484  << endl;
485 
486  // label nProcFaces = 0;
487 
488  // const labelList& nei = patchNeiProcNo[procI];
489 
490  // forAll(patchNeiProcNo[procI], i)
491  // {
492  // Info<< " Number of faces shared with processor "
493  // << patchNeiProcNo[procI][i] << " = " << patchSize[procI][i]
494  // << endl;
495 
496  // nProcFaces += patchSize[procI][i];
497  // }
498 
499  // Info<< " Number of processor patches = " << nei.size() << nl
500  // << " Number of processor faces = " << nProcFaces << nl
501  // << " Number of boundary faces = "
502  // << globalBoundaryFaces.localSize(procI) << endl;
503  }
504 }
505 
506 
507 bool Foam::backgroundMeshDecomposition::refineCell
508 (
509  label cellI,
510  volumeType volType,
511  scalar& weightEstimate
512 ) const
513 {
514  // Sample the box to find an estimate of the min size, and a volume
515  // estimate when overlapping == true.
516 
517 // const conformationSurfaces& geometry = geometryToConformTo_;
518 
519  treeBoundBox cellBb
520  (
521  mesh_.cells()[cellI].points
522  (
523  mesh_.faces(),
524  mesh_.points()
525  )
526  );
527 
528  weightEstimate = 1.0;
529 
530  if (volType == volumeType::MIXED)
531  {
532 // // Assess the cell size at the nearest point on the surface for the
533 // // MIXED cells, if the cell is large with respect to the cell size,
534 // // then refine it.
535 //
536 // pointField samplePoints
537 // (
538 // volRes_*volRes_*volRes_,
539 // vector::zero
540 // );
541 //
542 // // scalar sampleVol = cellBb.volume()/samplePoints.size();
543 //
544 // vector delta = cellBb.span()/volRes_;
545 //
546 // label pI = 0;
547 //
548 // for (label i = 0; i < volRes_; i++)
549 // {
550 // for (label j = 0; j < volRes_; j++)
551 // {
552 // for (label k = 0; k < volRes_; k++)
553 // {
554 // samplePoints[pI++] =
555 // cellBb.min()
556 // + vector
557 // (
558 // delta.x()*(i + 0.5),
559 // delta.y()*(j + 0.5),
560 // delta.z()*(k + 0.5)
561 // );
562 // }
563 // }
564 // }
565 //
566 // List<pointIndexHit> hitInfo;
567 // labelList hitSurfaces;
568 //
569 // geometry.findSurfaceNearest
570 // (
571 // samplePoints,
572 // scalarField(samplePoints.size(), sqr(GREAT)),
573 // hitInfo,
574 // hitSurfaces
575 // );
576 //
577 // // weightEstimate = 0.0;
578 //
579 // scalar minCellSize = GREAT;
580 //
581 // forAll(samplePoints, i)
582 // {
583 // scalar s = cellShapeControl_.cellSize
584 // (
585 // hitInfo[i].hitPoint()
586 // );
587 //
588 // // Info<< "cellBb.midpoint() " << cellBb.midpoint() << nl
589 // // << samplePoints[i] << nl
590 // // << hitInfo[i] << nl
591 // // << "cellBb.span() " << cellBb.span() << nl
592 // // << "cellBb.mag() " << cellBb.mag() << nl
593 // // << s << endl;
594 //
595 // if (s < minCellSize)
596 // {
597 // minCellSize = max(s, minCellSizeLimit_);
598 // }
599 //
600 // // Estimate the number of points in the cell by the surface size,
601 // // this is likely to be too small, so reduce.
602 // // weightEstimate += sampleVol/pow3(s);
603 // }
604 //
605 // if (sqr(spanScale_)*sqr(minCellSize) < magSqr(cellBb.span()))
606 // {
607 // return true;
608 // }
609  }
610  else if (volType == volumeType::INSIDE)
611  {
612  // scalar s =
613  // foamyHexMesh_.cellShapeControl_.cellSize(cellBb.midpoint());
614 
615  // Estimate the number of points in the cell by the size at the cell
616  // midpoint
617  // weightEstimate = cellBb.volume()/pow3(s);
618 
619  return false;
620  }
621  // else
622  // {
623  // weightEstimate = 1.0;
624 
625  // return false;
626  // }
627 
628  return false;
629 }
630 
631 
632 Foam::labelList Foam::backgroundMeshDecomposition::selectRefinementCells
633 (
634  List<volumeType>& volumeStatus,
635  volScalarField& cellWeights
636 ) const
637 {
638  volScalarField::InternalField& icellWeights = cellWeights.internalField();
639 
640  labelHashSet cellsToRefine;
641 
642  // Determine/update the status of each cell
643  forAll(volumeStatus, cellI)
644  {
645  if (volumeStatus[cellI] == volumeType::MIXED)
646  {
647  if (meshCutter_.cellLevel()[cellI] < minLevels_)
648  {
649  cellsToRefine.insert(cellI);
650  }
651  }
652 
653  if (volumeStatus[cellI] != volumeType::OUTSIDE)
654  {
655  if
656  (
657  refineCell
658  (
659  cellI,
660  volumeStatus[cellI],
661  icellWeights[cellI]
662  )
663  )
664  {
665  cellsToRefine.insert(cellI);
666  }
667  }
668  }
669 
670  return cellsToRefine.toc();
671 }
672 
673 
674 void Foam::backgroundMeshDecomposition::buildPatchAndTree()
675 {
676  primitivePatch tmpBoundaryFaces
677  (
678  SubList<face>
679  (
680  mesh_.faces(),
681  mesh_.nFaces() - mesh_.nInternalFaces(),
682  mesh_.nInternalFaces()
683  ),
684  mesh_.points()
685  );
686 
687  boundaryFacesPtr_.reset
688  (
689  new bPatch
690  (
691  tmpBoundaryFaces.localFaces(),
692  tmpBoundaryFaces.localPoints()
693  )
694  );
695 
696  // Overall bb
697  treeBoundBox overallBb(boundaryFacesPtr_().localPoints());
698 
699  Random& rnd = rndGen_;
700 
701  bFTreePtr_.reset
702  (
703  new indexedOctree<treeDataBPatch>
704  (
706  (
707  false,
708  boundaryFacesPtr_(),
710  ),
711  overallBb.extend(rnd, 1e-4),
712  10, // maxLevel
713  10, // leafSize
714  3.0 // duplicity
715  )
716  );
717 
718  // Give the bounds of every processor to every other processor
719  allBackgroundMeshBounds_[Pstream::myProcNo()] = overallBb;
720 
721  Pstream::gatherList(allBackgroundMeshBounds_);
722  Pstream::scatterList(allBackgroundMeshBounds_);
723 
724  point bbMin(GREAT, GREAT, GREAT);
725  point bbMax(-GREAT, -GREAT, -GREAT);
726 
727  forAll(allBackgroundMeshBounds_, procI)
728  {
729  bbMin = min(bbMin, allBackgroundMeshBounds_[procI].min());
730  bbMax = max(bbMax, allBackgroundMeshBounds_[procI].max());
731  }
732 
733  globalBackgroundBounds_ = treeBoundBox(bbMin, bbMax);
734 
735  if (false)
736  {
737  OFstream fStr
738  (
739  mesh_.time().path()
740  /"backgroundMeshDecomposition_proc_"
742  + "_boundaryFaces.obj"
743  );
744 
745  const faceList& faces = boundaryFacesPtr_().localFaces();
746  const List<point>& points = boundaryFacesPtr_().localPoints();
747 
748  Map<label> foamToObj(points.size());
749 
750  label vertI = 0;
751 
752  forAll(faces, i)
753  {
754  const face& f = faces[i];
755 
756  forAll(f, fPI)
757  {
758  if (foamToObj.insert(f[fPI], vertI))
759  {
760  meshTools::writeOBJ(fStr, points[f[fPI]]);
761  vertI++;
762  }
763  }
764 
765  fStr<< 'f';
766 
767  forAll(f, fPI)
768  {
769  fStr<< ' ' << foamToObj[f[fPI]] + 1;
770  }
771 
772  fStr<< nl;
773  }
774  }
775 }
776 
777 
778 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
779 
780 Foam::backgroundMeshDecomposition::backgroundMeshDecomposition
781 (
782  const Time& runTime,
783  Random& rndGen,
784  const conformationSurfaces& geometryToConformTo,
785  const dictionary& coeffsDict
786 )
787 :
788  runTime_(runTime),
789  geometryToConformTo_(geometryToConformTo),
790  rndGen_(rndGen),
791  mesh_
792  (
793  IOobject
794  (
795  "backgroundMeshDecomposition",
796  runTime_.timeName(),
797  runTime_,
798  IOobject::MUST_READ,
799  IOobject::AUTO_WRITE,
800  false
801  )
802  ),
803  meshCutter_
804  (
805  mesh_,
806  labelList(mesh_.nCells(), 0),
807  labelList(mesh_.nPoints(), 0)
808  ),
809  boundaryFacesPtr_(),
810  bFTreePtr_(),
811  allBackgroundMeshBounds_(Pstream::nProcs()),
812  globalBackgroundBounds_(),
813  decomposeDict_
814  (
815  IOobject
816  (
817  "decomposeParDict",
818  runTime_.system(),
819  runTime_,
820  IOobject::MUST_READ_IF_MODIFIED,
821  IOobject::NO_WRITE
822  )
823  ),
824  decomposerPtr_(decompositionMethod::New(decomposeDict_)),
825  mergeDist_(1e-6*mesh_.bounds().mag()),
826  spanScale_(readScalar(coeffsDict.lookup("spanScale"))),
827  minCellSizeLimit_
828  (
829  coeffsDict.lookupOrDefault<scalar>("minCellSizeLimit", 0.0)
830  ),
831  minLevels_(readLabel(coeffsDict.lookup("minLevels"))),
832  volRes_(readLabel(coeffsDict.lookup("sampleResolution"))),
833  maxCellWeightCoeff_(readScalar(coeffsDict.lookup("maxCellWeightCoeff")))
834 {
835  if (!Pstream::parRun())
836  {
838  (
839  "Foam::backgroundMeshDecomposition::backgroundMeshDecomposition"
840  "("
841  "const dictionary& coeffsDict, "
842  "const conformalVoronoiMesh& foamyHexMesh"
843  ")"
844  )
845  << "This cannot be used when not running in parallel."
846  << exit(FatalError);
847  }
848 
849  if (!decomposerPtr_().parallelAware())
850  {
852  (
853  "void Foam::backgroundMeshDecomposition::initialRefinement() const"
854  )
855  << "You have selected decomposition method "
856  << decomposerPtr_().typeName
857  << " which is not parallel aware." << endl
858  << exit(FatalError);
859  }
860 
861  Info<< nl << "Building initial background mesh decomposition" << endl;
862 
863  initialRefinement();
864 }
865 
866 
867 // * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
868 
870 {}
871 
872 
873 // * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
874 
877 (
878  volScalarField& cellWeights
879 )
880 {
881  if (debug)
882  {
883  // const_cast<Time&>(mesh_.time())++;
884  // Info<< "Time " << mesh_.time().timeName() << endl;
885  cellWeights.write();
886  mesh_.write();
887  }
888 
889  volScalarField::InternalField& icellWeights = cellWeights.internalField();
890 
891  while (true)
892  {
893  // Refine large cells if necessary
894 
895  label nOccupiedCells = 0;
896 
897  forAll(icellWeights, cI)
898  {
899  if (icellWeights[cI] > 1 - SMALL)
900  {
901  nOccupiedCells++;
902  }
903  }
904 
905  // Only look at occupied cells, as there is a possibility of runaway
906  // refinement if the number of cells grows too fast. Also, clip the
907  // minimum cellWeightLimit at maxCellWeightCoeff_
908 
909  scalar cellWeightLimit = max
910  (
911  maxCellWeightCoeff_
912  *sum(cellWeights).value()
913  /returnReduce(nOccupiedCells, sumOp<label>()),
914  maxCellWeightCoeff_
915  );
916 
917  if (debug)
918  {
919  Info<< " cellWeightLimit " << cellWeightLimit << endl;
920 
921  Pout<< " sum(cellWeights) " << sum(cellWeights.internalField())
922  << " max(cellWeights) " << max(cellWeights.internalField())
923  << endl;
924  }
925 
926  labelHashSet cellsToRefine;
927 
928  forAll(icellWeights, cWI)
929  {
930  if (icellWeights[cWI] > cellWeightLimit)
931  {
932  cellsToRefine.insert(cWI);
933  }
934  }
935 
936  if (returnReduce(cellsToRefine.size(), sumOp<label>()) == 0)
937  {
938  break;
939  }
940 
941  // Maintain 2:1 ratio
942  labelList newCellsToRefine
943  (
944  meshCutter_.consistentRefinement
945  (
946  cellsToRefine.toc(),
947  true // extend set
948  )
949  );
950 
951  if (debug && !cellsToRefine.empty())
952  {
953  Pout<< " cellWeights too large in " << cellsToRefine.size()
954  << " cells" << endl;
955  }
956 
957  forAll(newCellsToRefine, nCTRI)
958  {
959  label cellI = newCellsToRefine[nCTRI];
960 
961  icellWeights[cellI] /= 8.0;
962  }
963 
964  // Mesh changing engine.
965  polyTopoChange meshMod(mesh_);
966 
967  // Play refinement commands into mesh changer.
968  meshCutter_.setRefinement(newCellsToRefine, meshMod);
969 
970  // Create mesh, return map from old to new mesh.
971  autoPtr<mapPolyMesh> map = meshMod.changeMesh
972  (
973  mesh_,
974  false, // inflate
975  true, // syncParallel
976  true, // orderCells (to reduce cell motion)
977  false // orderPoints
978  );
979 
980  // Update fields
981  mesh_.updateMesh(map);
982 
983  // Update numbering of cells/vertices.
984  meshCutter_.updateMesh(map);
985 
986  Info<< " Background mesh refined from "
987  << returnReduce(map().nOldCells(), sumOp<label>())
988  << " to " << mesh_.globalData().nTotalCells()
989  << " cells." << endl;
990 
991  if (debug)
992  {
993  // const_cast<Time&>(mesh_.time())++;
994  // Info<< "Time " << mesh_.time().timeName() << endl;
995  cellWeights.write();
996  mesh_.write();
997  }
998  }
999 
1000  if (debug)
1001  {
1002  printMeshData(mesh_);
1003 
1004  Pout<< " Pre distribute sum(cellWeights) "
1005  << sum(icellWeights)
1006  << " max(cellWeights) "
1007  << max(icellWeights)
1008  << endl;
1009  }
1010 
1011  labelList newDecomp = decomposerPtr_().decompose
1012  (
1013  mesh_,
1014  mesh_.cellCentres(),
1015  icellWeights
1016  );
1017 
1018  Info<< " Redistributing background mesh cells" << endl;
1019 
1020  fvMeshDistribute distributor(mesh_, mergeDist_);
1021 
1022  autoPtr<mapDistributePolyMesh> mapDist = distributor.distribute(newDecomp);
1023 
1024  meshCutter_.distribute(mapDist);
1025 
1026  if (debug)
1027  {
1028  printMeshData(mesh_);
1029 
1030  Pout<< " Post distribute sum(cellWeights) "
1031  << sum(icellWeights)
1032  << " max(cellWeights) "
1033  << max(icellWeights)
1034  << endl;
1035 
1036  // const_cast<Time&>(mesh_.time())++;
1037  // Info<< "Time " << mesh_.time().timeName() << endl;
1038  mesh_.write();
1039  cellWeights.write();
1040  }
1041 
1042  buildPatchAndTree();
1043 
1044  return mapDist;
1045 }
1046 
1047 
1049 (
1050  const point& pt
1051 ) const
1052 {
1053 // return bFTreePtr_().findAnyOverlap(pt, 0.0);
1054 
1055  return (bFTreePtr_().getVolumeType(pt) == volumeType::INSIDE);
1056 }
1057 
1058 
1060 (
1061  const List<point>& pts
1062 ) const
1063 {
1064  boolList posProc(pts.size(), true);
1065 
1066  forAll(pts, pI)
1067  {
1068  posProc[pI] = positionOnThisProcessor(pts[pI]);
1069  }
1070 
1071  return posProc;
1072 }
1073 
1074 
1076 (
1077  const treeBoundBox& box
1078 ) const
1079 {
1080 // return !procBounds().contains(box);
1081  return !bFTreePtr_().findBox(box).empty();
1082 }
1083 
1084 
1086 (
1087  const point& centre,
1088  const scalar radiusSqr
1089 ) const
1090 {
1091  //return bFTreePtr_().findAnyOverlap(centre, radiusSqr);
1092 
1093  return bFTreePtr_().findNearest(centre, radiusSqr).hit();
1094 }
1095 
1096 
1098 (
1099  const point& start,
1100  const point& end
1101 ) const
1102 {
1103  return bFTreePtr_().findLine(start, end);
1104 }
1105 
1106 
1108 (
1109  const point& start,
1110  const point& end
1111 ) const
1112 {
1113  return bFTreePtr_().findLineAny(start, end);
1114 }
1115 
1116 
1118 (
1119  const List<point>& pts
1120 ) const
1121 {
1122  DynamicList<label> toCandidateProc;
1123  DynamicList<point> testPoints;
1124  labelList ptBlockStart(pts.size(), -1);
1125  labelList ptBlockSize(pts.size(), -1);
1126 
1127  label nTotalCandidates = 0;
1128 
1129  forAll(pts, pI)
1130  {
1131  const point& pt = pts[pI];
1132 
1133  label nCandidates = 0;
1134 
1135  forAll(allBackgroundMeshBounds_, procI)
1136  {
1137  // Candidate points may lie just outside a processor box, increase
1138  // test range by using overlaps rather than contains
1139  if (allBackgroundMeshBounds_[procI].overlaps(pt, sqr(SMALL*100)))
1140  {
1141  toCandidateProc.append(procI);
1142  testPoints.append(pt);
1143 
1144  nCandidates++;
1145  }
1146  }
1147 
1148  ptBlockStart[pI] = nTotalCandidates;
1149  ptBlockSize[pI] = nCandidates;
1150 
1151  nTotalCandidates += nCandidates;
1152  }
1153 
1154  // Needed for reverseDistribute
1155  label preDistributionToCandidateProcSize = toCandidateProc.size();
1156 
1157  autoPtr<mapDistribute> map(buildMap(toCandidateProc));
1158 
1159  map().distribute(testPoints);
1160 
1161  List<scalar> distanceSqrToCandidate(testPoints.size(), sqr(GREAT));
1162 
1163  // Test candidate points on candidate processors
1164  forAll(testPoints, tPI)
1165  {
1166  pointIndexHit info = bFTreePtr_().findNearest
1167  (
1168  testPoints[tPI],
1169  sqr(GREAT)
1170  );
1171 
1172  if (info.hit())
1173  {
1174  distanceSqrToCandidate[tPI] = magSqr
1175  (
1176  testPoints[tPI] - info.hitPoint()
1177  );
1178  }
1179  }
1180 
1181  map().reverseDistribute
1182  (
1183  preDistributionToCandidateProcSize,
1184  distanceSqrToCandidate
1185  );
1186 
1187  labelList ptNearestProc(pts.size(), -1);
1188 
1189  forAll(pts, pI)
1190  {
1191  // Extract the sub list of results for this point
1192 
1193  SubList<scalar> ptNearestProcResults
1194  (
1195  distanceSqrToCandidate,
1196  ptBlockSize[pI],
1197  ptBlockStart[pI]
1198  );
1199 
1200  scalar nearestProcDistSqr = GREAT;
1201 
1202  forAll(ptNearestProcResults, pPRI)
1203  {
1204  if (ptNearestProcResults[pPRI] < nearestProcDistSqr)
1205  {
1206  nearestProcDistSqr = ptNearestProcResults[pPRI];
1207 
1208  ptNearestProc[pI] = toCandidateProc[ptBlockStart[pI] + pPRI];
1209  }
1210  }
1211 
1212  if (debug)
1213  {
1214  Pout<< pts[pI] << " nearestProcDistSqr " << nearestProcDistSqr
1215  << " ptNearestProc[pI] " << ptNearestProc[pI] << endl;
1216  }
1217 
1218  if (ptNearestProc[pI] < 0)
1219  {
1220  FatalErrorIn
1221  (
1222  "Foam::labelList"
1223  "Foam::backgroundMeshDecomposition::processorNearestPosition"
1224  "("
1225  "const List<point>& pts"
1226  ") const"
1227  )
1228  << "The position " << pts[pI]
1229  << " did not find a nearest point on the background mesh."
1230  << exit(FatalError);
1231  }
1232  }
1233 
1234  return ptNearestProc;
1235 }
1236 
1237 
1238 
1241 (
1242  const List<point>& starts,
1243  const List<point>& ends,
1244  bool includeOwnProcessor
1245 ) const
1246 {
1247  DynamicList<label> toCandidateProc;
1248  DynamicList<point> testStarts;
1249  DynamicList<point> testEnds;
1250  labelList segmentBlockStart(starts.size(), -1);
1251  labelList segmentBlockSize(starts.size(), -1);
1252 
1253  label nTotalCandidates = 0;
1254 
1255  forAll(starts, sI)
1256  {
1257  const point& s = starts[sI];
1258  const point& e = ends[sI];
1259 
1260  // Dummy point for treeBoundBox::intersects
1261  point p(vector::zero);
1262 
1263  label nCandidates = 0;
1264 
1265  forAll(allBackgroundMeshBounds_, procI)
1266  {
1267  // It is assumed that the sphere in question overlaps the source
1268  // processor, so don't test it, unless includeOwnProcessor is true
1269  if
1270  (
1271  (includeOwnProcessor || procI != Pstream::myProcNo())
1272  && allBackgroundMeshBounds_[procI].intersects(s, e, p)
1273  )
1274  {
1275  toCandidateProc.append(procI);
1276  testStarts.append(s);
1277  testEnds.append(e);
1278 
1279  nCandidates++;
1280  }
1281  }
1282 
1283  segmentBlockStart[sI] = nTotalCandidates;
1284  segmentBlockSize[sI] = nCandidates;
1285 
1286  nTotalCandidates += nCandidates;
1287  }
1288 
1289  // Needed for reverseDistribute
1290  label preDistributionToCandidateProcSize = toCandidateProc.size();
1291 
1292  autoPtr<mapDistribute> map(buildMap(toCandidateProc));
1293 
1294  map().distribute(testStarts);
1295  map().distribute(testEnds);
1296 
1297  List<pointIndexHit> segmentIntersectsCandidate(testStarts.size());
1298 
1299  // Test candidate segments on candidate processors
1300  forAll(testStarts, sI)
1301  {
1302  const point& s = testStarts[sI];
1303  const point& e = testEnds[sI];
1304 
1305  // If the sphere finds some elements of the patch, then it overlaps
1306  segmentIntersectsCandidate[sI] = bFTreePtr_().findLine(s, e);
1307  }
1308 
1309  map().reverseDistribute
1310  (
1311  preDistributionToCandidateProcSize,
1312  segmentIntersectsCandidate
1313  );
1314 
1315  List<List<pointIndexHit> > segmentHitProcs(starts.size());
1316 
1317  // Working storage for assessing processors
1318  DynamicList<pointIndexHit> tmpProcHits;
1319 
1320  forAll(starts, sI)
1321  {
1322  tmpProcHits.clear();
1323 
1324  // Extract the sub list of results for this point
1325 
1326  SubList<pointIndexHit> segmentProcResults
1327  (
1328  segmentIntersectsCandidate,
1329  segmentBlockSize[sI],
1330  segmentBlockStart[sI]
1331  );
1332 
1333  forAll(segmentProcResults, sPRI)
1334  {
1335  if (segmentProcResults[sPRI].hit())
1336  {
1337  tmpProcHits.append(segmentProcResults[sPRI]);
1338 
1339  tmpProcHits.last().setIndex
1340  (
1341  toCandidateProc[segmentBlockStart[sI] + sPRI]
1342  );
1343  }
1344  }
1345 
1346  segmentHitProcs[sI] = tmpProcHits;
1347  }
1348 
1349  return segmentHitProcs;
1350 }
1351 
1352 
1354 (
1355  const point& centre,
1356  const scalar& radiusSqr
1357 ) const
1358 {
1359  forAll(allBackgroundMeshBounds_, procI)
1360  {
1361  if (bFTreePtr_().findNearest(centre, radiusSqr).hit())
1362  {
1363  return true;
1364  }
1365  }
1366 
1367  return false;
1368 }
1369 
1370 
1372 (
1373  const point& centre,
1374  const scalar radiusSqr
1375 ) const
1376 {
1377  DynamicList<label> toProc(Pstream::nProcs());
1378 
1379  forAll(allBackgroundMeshBounds_, procI)
1380  {
1381  // Test against the bounding box of the processor
1382  if
1383  (
1384  procI != Pstream::myProcNo()
1385  && allBackgroundMeshBounds_[procI].overlaps(centre, radiusSqr)
1386  )
1387  {
1388  // Expensive test
1389 // if (bFTreePtr_().findNearest(centre, radiusSqr).hit())
1390  {
1391  toProc.append(procI);
1392  }
1393  }
1394  }
1395 
1396  return toProc;
1397 }
1398 
1399 
1400 //Foam::labelListList Foam::backgroundMeshDecomposition::overlapsProcessors
1401 //(
1402 // const List<point>& centres,
1403 // const List<scalar>& radiusSqrs,
1404 // const Delaunay& T,
1405 // bool includeOwnProcessor
1406 //) const
1407 //{
1408 // DynamicList<label> toCandidateProc;
1409 // DynamicList<point> testCentres;
1410 // DynamicList<scalar> testRadiusSqrs;
1411 // labelList sphereBlockStart(centres.size(), -1);
1412 // labelList sphereBlockSize(centres.size(), -1);
1413 //
1414 // label nTotalCandidates = 0;
1415 //
1416 // forAll(centres, sI)
1417 // {
1418 // const point& c = centres[sI];
1419 // scalar rSqr = radiusSqrs[sI];
1420 //
1421 // label nCandidates = 0;
1422 //
1423 // forAll(allBackgroundMeshBounds_, procI)
1424 // {
1425 // // It is assumed that the sphere in question overlaps the source
1426 // // processor, so don't test it, unless includeOwnProcessor is true
1427 // if
1428 // (
1429 // (includeOwnProcessor || procI != Pstream::myProcNo())
1430 // && allBackgroundMeshBounds_[procI].overlaps(c, rSqr)
1431 // )
1432 // {
1433 // if (bFTreePtr_().findNearest(c, rSqr).hit())
1434 // {
1435 // toCandidateProc.append(procI);
1436 // testCentres.append(c);
1437 // testRadiusSqrs.append(rSqr);
1438 //
1439 // nCandidates++;
1440 // }
1441 // }
1442 // }
1443 //
1444 // sphereBlockStart[sI] = nTotalCandidates;
1445 // sphereBlockSize[sI] = nCandidates;
1446 //
1447 // nTotalCandidates += nCandidates;
1448 // }
1449 //
1450 // // Needed for reverseDistribute
1457 //
1458 // // @todo This is faster, but results in more vertices being referred
1459 // boolList sphereOverlapsCandidate(testCentres.size(), true);
1496 //
1502 //
1503 // labelListList sphereProcs(centres.size());
1504 //
1505 // // Working storage for assessing processors
1506 // DynamicList<label> tmpProcs;
1507 //
1508 // forAll(centres, sI)
1509 // {
1510 // tmpProcs.clear();
1511 //
1512 // // Extract the sub list of results for this point
1513 //
1514 // SubList<bool> sphereProcResults
1515 // (
1516 // sphereOverlapsCandidate,
1517 // sphereBlockSize[sI],
1518 // sphereBlockStart[sI]
1519 // );
1520 //
1521 // forAll(sphereProcResults, sPRI)
1522 // {
1523 // if (sphereProcResults[sPRI])
1524 // {
1525 // tmpProcs.append(toCandidateProc[sphereBlockStart[sI] + sPRI]);
1526 // }
1527 // }
1528 //
1529 // sphereProcs[sI] = tmpProcs;
1530 // }
1531 //
1532 // return sphereProcs;
1533 //}
1534 
1535 
1536 //Foam::labelListList Foam::backgroundMeshDecomposition::overlapProcessors
1537 //(
1538 // const point& cc,
1539 // const scalar rSqr
1540 //) const
1541 //{
1542 // DynamicList<label> toCandidateProc;
1543 // label sphereBlockStart(-1);
1544 // label sphereBlockSize(-1);
1545 //
1546 // label nCandidates = 0;
1547 //
1548 // forAll(allBackgroundMeshBounds_, procI)
1549 // {
1550 // // It is assumed that the sphere in question overlaps the source
1551 // // processor, so don't test it, unless includeOwnProcessor is true
1552 // if
1553 // (
1554 // (includeOwnProcessor || procI != Pstream::myProcNo())
1555 // && allBackgroundMeshBounds_[procI].overlaps(cc, rSqr)
1556 // )
1557 // {
1558 // toCandidateProc.append(procI);
1559 //
1560 // nCandidates++;
1561 // }
1562 // }
1563 //
1564 // sphereBlockSize = nCandidates;
1565 // nTotalCandidates += nCandidates;
1566 //
1567 // // Needed for reverseDistribute
1568 // label preDistributionToCandidateProcSize = toCandidateProc.size();
1569 //
1570 // autoPtr<mapDistribute> map(buildMap(toCandidateProc));
1571 //
1572 // map().distribute(testCentres);
1573 // map().distribute(testRadiusSqrs);
1574 //
1575 // // @todo This is faster, but results in more vertices being referred
1577 // boolList sphereOverlapsCandidate(testCentres.size(), false);
1578 //
1579 // // Test candidate spheres on candidate processors
1580 // forAll(testCentres, sI)
1581 // {
1582 // const point& c = testCentres[sI];
1583 // const scalar rSqr = testRadiusSqrs[sI];
1584 //
1585 // const bool flagOverlap = bFTreePtr_().findNearest(c, rSqr).hit();
1586 //
1587 // if (flagOverlap)
1588 // {
1589 // //if (vertexOctree.findAnyOverlap(c, rSqr))
1594 //
1609 // sphereOverlapsCandidate[sI] = true;
1611 // }
1612 // }
1613 //
1614 // map().reverseDistribute
1615 // (
1616 // preDistributionToCandidateProcSize,
1617 // sphereOverlapsCandidate
1618 // );
1619 //
1620 // labelListList sphereProcs(centres.size());
1621 //
1622 // // Working storage for assessing processors
1623 // DynamicList<label> tmpProcs;
1624 //
1625 // forAll(centres, sI)
1626 // {
1627 // tmpProcs.clear();
1628 //
1629 // // Extract the sub list of results for this point
1630 //
1631 // SubList<bool> sphereProcResults
1632 // (
1633 // sphereOverlapsCandidate,
1634 // sphereBlockSize[sI],
1635 // sphereBlockStart[sI]
1636 // );
1637 //
1638 // forAll(sphereProcResults, sPRI)
1639 // {
1640 // if (sphereProcResults[sPRI])
1641 // {
1642 // tmpProcs.append(toCandidateProc[sphereBlockStart[sI] + sPRI]);
1643 // }
1644 // }
1645 //
1646 // sphereProcs[sI] = tmpProcs;
1647 // }
1648 //
1649 // return sphereProcs;
1650 //}
1651 
1652 
1653 // ************************************************************************* //
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:380
autoPtr< mapDistributePolyMesh > distribute(volScalarField &cellWeights)
Redistribute the background mesh based on a supplied weight field,.
static scalar & perturbTol()
Get the perturbation tolerance.
bool overlapsOtherProcessors(const point &centre, const scalar &radiusSqr) const
T returnReduce(const T &Value, const BinaryOp &bop, const int tag=Pstream::msgType(), const label comm=UPstream::worldComm)
This class describes the interaction of (usually) a face and a point. It carries the info of a succes...
Definition: PointIndexHit.H:53
vector point
Point is a vector.
Definition: point.H:41
dimensioned< scalar > mag(const dimensioned< Type > &)
word name(const complex &)
Return a string representation of a complex.
Definition: complex.C:47
labelList f(nPoints)
static void gatherList(const List< commsStruct > &comms, List< T > &Values, const int tag, const label comm)
Gather data but keep individual values separate.
dimensioned< scalar > magSqr(const dimensioned< Type > &)
labelList processorNearestPosition(const List< point > &pts) const
What is the nearest processor to the given position?
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:59
static autoPtr< mapDistribute > buildMap(const List< label > &toProc)
Build a mapDistribute for the supplied destination processor data.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:124
PointIndexHit< point > pointIndexHit
Definition: pointIndexHit.H:42
static void scatterList(const List< commsStruct > &comms, List< T > &Values, const int tag, const label comm)
Scatter data. Reverse of gatherList.
List< face > faceList
Definition: faceListFwd.H:43
messageStream Info
pointIndexHit findLineAny(const point &start, const point &end) const
Find any intersection of line between start and end, (exposing.
Namespace for OpenFOAM.
autoPtr< BasicCompressibleTurbulenceModel > New(const volScalarField &rho, const volVectorField &U, const surfaceScalarField &phi, const typename BasicCompressibleTurbulenceModel::transportModel &transport, const word &propertiesName)
labelList overlapProcessors(const point &centre, const scalar radiusSqr) const
label readLabel(Istream &is)
Definition: label.H:64
static label nProcs(const label communicator=0)
Number of processes in parallel run.
Definition: UPstream.H:386
static const char nl
Definition: Ostream.H:260
bool positionOnThisProcessor(const point &pt) const
Is the given position inside the domain of this decomposition.
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:251
stressControl lookup("compactNormalStress") >> compactNormalStress
GeometricField< scalar, fvPatchField, volMesh > volScalarField
Definition: volFieldsFwd.H:52
volScalarField & p
Definition: createFields.H:51
dimensioned< Type > max(const dimensioned< Type > &, const dimensioned< Type > &)
#define forAll(list, i)
Definition: UList.H:421
bool readScalar(const char *buf, doubleScalar &s)
Read whole of buf as a scalar. Return true if succesful.
Definition: doubleScalar.H:63
virtual Ostream & write(const token &)=0
Write next token to stream.
~backgroundMeshDecomposition()
Destructor.
labelList identity(const label len)
Create identity map (map[i] == i) of given length.
Definition: ListOps.C:104
dimensioned< Type > sum(const DimensionedField< Type, GeoMesh > &df)
bool overlapsThisProcessor(const treeBoundBox &box) const
Does the given box overlap the faces of the boundary of this.
static int myProcNo(const label communicator=0)
Number of this process (starting from masterNo() = 0)
Definition: UPstream.H:404
#define FatalErrorIn(functionName)
Report an error message using Foam::FatalError.
Definition: error.H:314
HashSet< label, Hash< label > > labelHashSet
A HashSet with label keys.
Definition: HashSet.H:210
pointIndexHit findLine(const point &start, const point &end) const
Find nearest intersection of line between start and end, (exposing.
int system(const std::string &command)
Execute the specified command.
Definition: POSIX.C:1162
error FatalError
List< List< pointIndexHit > > intersectsProcessors(const List< point > &starts, const List< point > &ends, bool includeOwnProcessor=false) const
Which processors are intersected by the line segment, returns all.
List< label > labelList
A List of labels.
Definition: labelList.H:56
static const Vector zero
Definition: Vector.H:80
const dimensionSet dimless(0, 0, 0, 0, 0, 0, 0)
Definition: dimensionSets.H:47
PrimitivePatch< face, SubList, const pointField & > primitivePatch
Addressing for a faceList slice.
void combineReduce(const List< UPstream::commsStruct > &comms, T &Value, const CombineOp &cop, const int tag, const label comm)
dimensionedSymmTensor sqr(const dimensionedVector &dv)
dimensioned< Type > min(const dimensioned< Type > &, const dimensioned< Type > &)
label nPoints
List< labelList > labelListList
A List of labelList.
Definition: labelList.H:57
void writeOBJ(Ostream &os, const point &pt)
Write obj representation of point.
Definition: meshTools.C:209
PrimitivePatch< face, List, const pointField > bPatch
dimensioned< scalar > dimensionedScalar
Dimensioned scalar obtained from generic dimensioned type.
treeDataPrimitivePatch< bPatch > treeDataBPatch
defineTypeNameAndDebug(combustionModel, 0)
word timeName
Definition: getTimeIndex.H:3
prefixOSstream Pout(cout,"Pout")
Definition: IOstreams.H:53
List< bool > boolList
Bool container classes.
Definition: boolList.H:50