All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
simpleGeomDecomp.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration | Website: https://openfoam.org
5  \\ / A nd | Copyright (C) 2011-2018 OpenFOAM Foundation
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*---------------------------------------------------------------------------*/
25 
26 #include "simpleGeomDecomp.H"
28 #include "SortableList.H"
29 #include "globalIndex.H"
30 #include "SubField.H"
31 
32 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
33 
34 namespace Foam
35 {
36  defineTypeNameAndDebug(simpleGeomDecomp, 0);
37 
39  (
40  decompositionMethod,
41  simpleGeomDecomp,
42  dictionary
43  );
44 }
45 
46 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
47 
48 // assignToProcessorGroup : given nCells cells and nProcGroup processor
49 // groups to share them, how do we share them out? Answer : each group
50 // gets nCells/nProcGroup cells, and the first few get one
51 // extra to make up the numbers. This should produce almost
52 // perfect load balancing
53 
54 void Foam::simpleGeomDecomp::assignToProcessorGroup
55 (
56  labelList& processorGroup,
57  const label nProcGroup
58 ) const
59 {
60  label jump = processorGroup.size()/nProcGroup;
61  label jumpb = jump + 1;
62  label fstProcessorGroup = processorGroup.size() - jump*nProcGroup;
63 
64  label ind = 0;
65  label j = 0;
66 
67  // assign cells to the first few processor groups (those with
68  // one extra cell each
69  for (j=0; j<fstProcessorGroup; j++)
70  {
71  for (label k=0; k<jumpb; k++)
72  {
73  processorGroup[ind++] = j;
74  }
75  }
76 
77  // and now to the `normal' processor groups
78  for (; j<nProcGroup; j++)
79  {
80  for (label k=0; k<jump; k++)
81  {
82  processorGroup[ind++] = j;
83  }
84  }
85 }
86 
87 
88 void Foam::simpleGeomDecomp::assignToProcessorGroup
89 (
90  labelList& processorGroup,
91  const label nProcGroup,
92  const labelList& indices,
93  const scalarField& weights,
94  const scalar summedWeights
95 ) const
96 {
97  // This routine gets the sorted points.
98  // Easiest to explain with an example.
99  // E.g. 400 points, sum of weights : 513.
100  // Now with number of divisions in this direction (nProcGroup) : 4
101  // gives the split at 513/4 = 128
102  // So summed weight from 0..128 goes into bin 0,
103  // ,, 128..256 goes into bin 1
104  // etc.
105  // Finally any remaining ones go into the last bin (3).
106 
107  const scalar jump = summedWeights/nProcGroup;
108  const label nProcGroupM1 = nProcGroup - 1;
109  scalar sumWeights = 0;
110  label ind = 0;
111  label j = 0;
112 
113  // assign cells to all except last group.
114  for (j=0; j<nProcGroupM1; j++)
115  {
116  const scalar limit = jump*scalar(j + 1);
117  while (sumWeights < limit)
118  {
119  sumWeights += weights[indices[ind]];
120  processorGroup[ind++] = j;
121  }
122  }
123  // Ensure last included.
124  while (ind < processorGroup.size())
125  {
126  processorGroup[ind++] = nProcGroupM1;
127  }
128 }
129 
130 
131 Foam::labelList Foam::simpleGeomDecomp::decomposeOneProc
132 (
133  const pointField& points
134 ) const
135 {
136  // construct a list for the final result
137  labelList finalDecomp(points.size());
138 
139  labelList processorGroups(points.size());
140 
141  labelList pointIndices(points.size());
142  forAll(pointIndices, i)
143  {
144  pointIndices[i] = i;
145  }
146 
147  const pointField rotatedPoints(rotDelta_ & points);
148 
149  // and one to take the processor group id's. For each direction.
150  // we assign the processors to groups of processors labelled
151  // 0..nX to give a banded structure on the mesh. Then we
152  // construct the actual processor number by treating this as
153  // the units part of the processor number.
154  sort
155  (
156  pointIndices,
157  UList<scalar>::less(rotatedPoints.component(vector::X))
158  );
159 
160  assignToProcessorGroup(processorGroups, n_.x());
161 
162  forAll(points, i)
163  {
164  finalDecomp[pointIndices[i]] = processorGroups[i];
165  }
166 
167 
168  // now do the same thing in the Y direction. These processor group
169  // numbers add multiples of nX to the proc. number (columns)
170  sort
171  (
172  pointIndices,
173  UList<scalar>::less(rotatedPoints.component(vector::Y))
174  );
175 
176  assignToProcessorGroup(processorGroups, n_.y());
177 
178  forAll(points, i)
179  {
180  finalDecomp[pointIndices[i]] += n_.x()*processorGroups[i];
181  }
182 
183 
184  // finally in the Z direction. Now we add multiples of nX*nY to give
185  // layers
186  sort
187  (
188  pointIndices,
189  UList<scalar>::less(rotatedPoints.component(vector::Z))
190  );
191 
192  assignToProcessorGroup(processorGroups, n_.z());
193 
194  forAll(points, i)
195  {
196  finalDecomp[pointIndices[i]] += n_.x()*n_.y()*processorGroups[i];
197  }
198 
199  return finalDecomp;
200 }
201 
202 
203 Foam::labelList Foam::simpleGeomDecomp::decomposeOneProc
204 (
205  const pointField& points,
206  const scalarField& weights
207 ) const
208 {
209  // construct a list for the final result
210  labelList finalDecomp(points.size());
211 
212  labelList processorGroups(points.size());
213 
214  labelList pointIndices(points.size());
215  forAll(pointIndices, i)
216  {
217  pointIndices[i] = i;
218  }
219 
220  const pointField rotatedPoints(rotDelta_ & points);
221 
222  // and one to take the processor group id's. For each direction.
223  // we assign the processors to groups of processors labelled
224  // 0..nX to give a banded structure on the mesh. Then we
225  // construct the actual processor number by treating this as
226  // the units part of the processor number.
227  sort
228  (
229  pointIndices,
230  UList<scalar>::less(rotatedPoints.component(vector::X))
231  );
232 
233  const scalar summedWeights = sum(weights);
234  assignToProcessorGroup
235  (
236  processorGroups,
237  n_.x(),
238  pointIndices,
239  weights,
240  summedWeights
241  );
242 
243  forAll(points, i)
244  {
245  finalDecomp[pointIndices[i]] = processorGroups[i];
246  }
247 
248 
249  // now do the same thing in the Y direction. These processor group
250  // numbers add multiples of nX to the proc. number (columns)
251  sort
252  (
253  pointIndices,
254  UList<scalar>::less(rotatedPoints.component(vector::Y))
255  );
256 
257  assignToProcessorGroup
258  (
259  processorGroups,
260  n_.y(),
261  pointIndices,
262  weights,
263  summedWeights
264  );
265 
266  forAll(points, i)
267  {
268  finalDecomp[pointIndices[i]] += n_.x()*processorGroups[i];
269  }
270 
271 
272  // finally in the Z direction. Now we add multiples of nX*nY to give
273  // layers
274  sort
275  (
276  pointIndices,
277  UList<scalar>::less(rotatedPoints.component(vector::Z))
278  );
279 
280  assignToProcessorGroup
281  (
282  processorGroups,
283  n_.z(),
284  pointIndices,
285  weights,
286  summedWeights
287  );
288 
289  forAll(points, i)
290  {
291  finalDecomp[pointIndices[i]] += n_.x()*n_.y()*processorGroups[i];
292  }
293 
294  return finalDecomp;
295 }
296 
297 
298 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
299 
301 :
302  geomDecomp(decompositionDict, typeName)
303 {}
304 
305 
306 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
307 
309 (
310  const pointField& points
311 )
312 {
313  if (!Pstream::parRun())
314  {
315  return decomposeOneProc(points);
316  }
317  else
318  {
319  globalIndex globalNumbers(points.size());
320 
321  // Collect all points on master
322  if (Pstream::master())
323  {
324  pointField allPoints(globalNumbers.size());
325 
326  label nTotalPoints = 0;
327  // Master first
328  SubField<point>(allPoints, points.size()) = points;
329  nTotalPoints += points.size();
330 
331  // Add slaves
332  for (int slave=1; slave<Pstream::nProcs(); slave++)
333  {
334  IPstream fromSlave(Pstream::commsTypes::scheduled, slave);
335  pointField nbrPoints(fromSlave);
337  (
338  allPoints,
339  nbrPoints.size(),
340  nTotalPoints
341  ) = nbrPoints;
342  nTotalPoints += nbrPoints.size();
343  }
344 
345  // Decompose
346  labelList finalDecomp(decomposeOneProc(allPoints));
347 
348  // Send back
349  for (int slave=1; slave<Pstream::nProcs(); slave++)
350  {
352  toSlave << SubField<label>
353  (
354  finalDecomp,
355  globalNumbers.localSize(slave),
356  globalNumbers.offset(slave)
357  );
358  }
359  // Get my own part
360  finalDecomp.setSize(points.size());
361 
362  return finalDecomp;
363  }
364  else
365  {
366  // Send my points
367  {
368  OPstream toMaster
369  (
372  );
373  toMaster<< points;
374  }
375 
376  // Receive back decomposition
377  IPstream fromMaster
378  (
381  );
382  labelList finalDecomp(fromMaster);
383 
384  return finalDecomp;
385  }
386  }
387 }
388 
389 
391 (
392  const pointField& points,
393  const scalarField& weights
394 )
395 {
396  if (!Pstream::parRun())
397  {
398  return decomposeOneProc(points, weights);
399  }
400  else
401  {
402  globalIndex globalNumbers(points.size());
403 
404  // Collect all points on master
405  if (Pstream::master())
406  {
407  pointField allPoints(globalNumbers.size());
408  scalarField allWeights(allPoints.size());
409 
410  label nTotalPoints = 0;
411  // Master first
412  SubField<point>(allPoints, points.size()) = points;
413  SubField<scalar>(allWeights, points.size()) = weights;
414  nTotalPoints += points.size();
415 
416  // Add slaves
417  for (int slave=1; slave<Pstream::nProcs(); slave++)
418  {
419  IPstream fromSlave(Pstream::commsTypes::scheduled, slave);
420  pointField nbrPoints(fromSlave);
421  scalarField nbrWeights(fromSlave);
423  (
424  allPoints,
425  nbrPoints.size(),
426  nTotalPoints
427  ) = nbrPoints;
429  (
430  allWeights,
431  nbrWeights.size(),
432  nTotalPoints
433  ) = nbrWeights;
434  nTotalPoints += nbrPoints.size();
435  }
436 
437  // Decompose
438  labelList finalDecomp(decomposeOneProc(allPoints, allWeights));
439 
440  // Send back
441  for (int slave=1; slave<Pstream::nProcs(); slave++)
442  {
444  toSlave << SubField<label>
445  (
446  finalDecomp,
447  globalNumbers.localSize(slave),
448  globalNumbers.offset(slave)
449  );
450  }
451  // Get my own part
452  finalDecomp.setSize(points.size());
453 
454  return finalDecomp;
455  }
456  else
457  {
458  // Send my points
459  {
460  OPstream toMaster
461  (
464  );
465  toMaster<< points << weights;
466  }
467 
468  // Receive back decomposition
469  IPstream fromMaster
470  (
473  );
474  labelList finalDecomp(fromMaster);
475 
476  return finalDecomp;
477  }
478  }
479 }
480 
481 
482 // ************************************************************************* //
#define forAll(list, i)
Loop across all elements in list.
Definition: UList.H:434
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:59
static int masterNo()
Process index of the master.
Definition: UPstream.H:417
A list of keyword definitions, which are a keyword followed by any number of values (e...
Definition: dictionary.H:158
A 1D array of objects of type <T>, where the size of the vector is known and used for subscript bound...
Definition: HashTable.H:59
void size(const label)
Override size to be inconsistent with allocated storage.
Definition: ListI.H:164
static bool master(const label communicator=0)
Am I the master process.
Definition: UPstream.H:423
label k
Boltzmann constant.
Pre-declare related SubField type.
Definition: Field.H:60
static bool less(const vector &x, const vector &y)
To compare normals.
Macros for easy insertion into run-time selection tables.
dimensioned< Type > sum(const DimensionedField< Type, GeoMesh > &df)
Input inter-processor communications stream.
Definition: IPstream.H:50
Calculates a unique integer (label so might not have enough room - 2G max) for processor + local inde...
Definition: globalIndex.H:63
vectorField pointField
pointField is a vectorField.
Definition: pointFieldFwd.H:42
Geometrical domain decomposition.
Definition: geomDecomp.H:47
const pointField & points
Field< scalar > scalarField
Specialisation of Field<T> for scalar.
void sort(UList< T > &)
Definition: UList.C:115
List< label > labelList
A List of labels.
Definition: labelList.H:56
virtual labelList decompose(const pointField &)
Like decompose but with uniform weights on the points.
addToRunTimeSelectionTable(ensightPart, ensightPartCells, istream)
defineTypeNameAndDebug(combustionModel, 0)
Output inter-processor communications stream.
Definition: OPstream.H:50
void setSize(const label)
Reset size of List.
Definition: List.C:281
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:399
static label nProcs(const label communicator=0)
Number of processes in parallel run.
Definition: UPstream.H:411
tmp< pointField > allPoints(const Triangulation &t)
Extract all points in vertex-index order.
simpleGeomDecomp(const dictionary &decompositionDict)
Construct given the decomposition dictionary.
Namespace for OpenFOAM.