simple.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration | Website: https://openfoam.org
5  \\ / A nd | Copyright (C) 2011-2024 OpenFOAM Foundation
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*---------------------------------------------------------------------------*/
25 
26 #include "simple.H"
28 #include "SortableList.H"
29 #include "globalIndex.H"
30 #include "SubField.H"
31 
32 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
33 
34 namespace Foam
35 {
36 namespace decompositionMethods
37 {
39 
41  (
43  simple,
44  decomposer
45  );
46 
48  (
50  simple,
51  distributor
52  );
53 }
54 }
55 
56 
57 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
58 
59 // assignToProcessorGroup : given nCells cells and nProcGroup processor
60 // groups to share them, how do we share them out? Answer : each group
61 // gets nCells/nProcGroup cells, and the first few get one
62 // extra to make up the numbers. This should produce almost
63 // perfect load balancing
64 
65 void Foam::decompositionMethods::simple::assignToProcessorGroup
66 (
67  labelList& processorGroup,
68  const label nProcGroup
69 ) const
70 {
71  label jump = processorGroup.size()/nProcGroup;
72  label jumpb = jump + 1;
73  label fstProcessorGroup = processorGroup.size() - jump*nProcGroup;
74 
75  label ind = 0;
76  label j = 0;
77 
78  // assign cells to the first few processor groups (those with
79  // one extra cell each
80  for (j=0; j<fstProcessorGroup; j++)
81  {
82  for (label k=0; k<jumpb; k++)
83  {
84  processorGroup[ind++] = j;
85  }
86  }
87 
88  // and now to the `normal' processor groups
89  for (; j<nProcGroup; j++)
90  {
91  for (label k=0; k<jump; k++)
92  {
93  processorGroup[ind++] = j;
94  }
95  }
96 }
97 
98 
99 void Foam::decompositionMethods::simple::assignToProcessorGroup
100 (
101  labelList& processorGroup,
102  const label nProcGroup,
103  const labelList& indices,
104  const scalarField& weights,
105  const scalar summedWeights
106 ) const
107 {
108  // This routine gets the sorted points.
109  // Easiest to explain with an example.
110  // E.g. 400 points, sum of weights : 513.
111  // Now with number of divisions in this direction (nProcGroup) : 4
112  // gives the split at 513/4 = 128
113  // So summed weight from 0..128 goes into bin 0,
114  // ,, 128..256 goes into bin 1
115  // etc.
116  // Finally any remaining ones go into the last bin (3).
117 
118  const scalar jump = summedWeights/nProcGroup;
119  const label nProcGroupM1 = nProcGroup - 1;
120  scalar sumWeights = 0;
121  label ind = 0;
122  label j = 0;
123 
124  // assign cells to all except last group.
125  for (j=0; j<nProcGroupM1; j++)
126  {
127  const scalar limit = jump*scalar(j + 1);
128  while (sumWeights < limit)
129  {
130  sumWeights += weights[indices[ind]];
131  processorGroup[ind++] = j;
132  }
133  }
134  // Ensure last included.
135  while (ind < processorGroup.size())
136  {
137  processorGroup[ind++] = nProcGroupM1;
138  }
139 }
140 
141 
142 Foam::labelList Foam::decompositionMethods::simple::decomposeOneProc
143 (
144  const pointField& points
145 ) const
146 {
147  // construct a list for the final result
148  labelList finalDecomp(points.size());
149 
150  labelList processorGroups(points.size());
151 
152  labelList pointIndices(points.size());
153  forAll(pointIndices, i)
154  {
155  pointIndices[i] = i;
156  }
157 
158  const pointField rotatedPoints(rotDelta_ & points);
159 
160  // and one to take the processor group id's. For each direction.
161  // we assign the processors to groups of processors labelled
162  // 0..nX to give a banded structure on the mesh. Then we
163  // construct the actual processor number by treating this as
164  // the units part of the processor number.
165  sort
166  (
167  pointIndices,
168  UList<scalar>::less(rotatedPoints.component(vector::X))
169  );
170 
171  assignToProcessorGroup(processorGroups, n_.x());
172 
173  forAll(points, i)
174  {
175  finalDecomp[pointIndices[i]] = processorGroups[i];
176  }
177 
178 
179  // now do the same thing in the Y direction. These processor group
180  // numbers add multiples of nX to the proc. number (columns)
181  sort
182  (
183  pointIndices,
184  UList<scalar>::less(rotatedPoints.component(vector::Y))
185  );
186 
187  assignToProcessorGroup(processorGroups, n_.y());
188 
189  forAll(points, i)
190  {
191  finalDecomp[pointIndices[i]] += n_.x()*processorGroups[i];
192  }
193 
194 
195  // finally in the Z direction. Now we add multiples of nX*nY to give
196  // layers
197  sort
198  (
199  pointIndices,
200  UList<scalar>::less(rotatedPoints.component(vector::Z))
201  );
202 
203  assignToProcessorGroup(processorGroups, n_.z());
204 
205  forAll(points, i)
206  {
207  finalDecomp[pointIndices[i]] += n_.x()*n_.y()*processorGroups[i];
208  }
209 
210  return finalDecomp;
211 }
212 
213 
214 Foam::labelList Foam::decompositionMethods::simple::decomposeOneProc
215 (
216  const pointField& points,
217  const scalarField& weights
218 ) const
219 {
220  // construct a list for the final result
221  labelList finalDecomp(points.size());
222 
223  labelList processorGroups(points.size());
224 
225  labelList pointIndices(points.size());
226  forAll(pointIndices, i)
227  {
228  pointIndices[i] = i;
229  }
230 
231  const pointField rotatedPoints(rotDelta_ & points);
232 
233  // and one to take the processor group id's. For each direction.
234  // we assign the processors to groups of processors labelled
235  // 0..nX to give a banded structure on the mesh. Then we
236  // construct the actual processor number by treating this as
237  // the units part of the processor number.
238  sort
239  (
240  pointIndices,
241  UList<scalar>::less(rotatedPoints.component(vector::X))
242  );
243 
244  const scalar summedWeights = sum(weights);
245  assignToProcessorGroup
246  (
247  processorGroups,
248  n_.x(),
249  pointIndices,
250  weights,
251  summedWeights
252  );
253 
254  forAll(points, i)
255  {
256  finalDecomp[pointIndices[i]] = processorGroups[i];
257  }
258 
259 
260  // now do the same thing in the Y direction. These processor group
261  // numbers add multiples of nX to the proc. number (columns)
262  sort
263  (
264  pointIndices,
265  UList<scalar>::less(rotatedPoints.component(vector::Y))
266  );
267 
268  assignToProcessorGroup
269  (
270  processorGroups,
271  n_.y(),
272  pointIndices,
273  weights,
274  summedWeights
275  );
276 
277  forAll(points, i)
278  {
279  finalDecomp[pointIndices[i]] += n_.x()*processorGroups[i];
280  }
281 
282 
283  // finally in the Z direction. Now we add multiples of nX*nY to give
284  // layers
285  sort
286  (
287  pointIndices,
288  UList<scalar>::less(rotatedPoints.component(vector::Z))
289  );
290 
291  assignToProcessorGroup
292  (
293  processorGroups,
294  n_.z(),
295  pointIndices,
296  weights,
297  summedWeights
298  );
299 
300  forAll(points, i)
301  {
302  finalDecomp[pointIndices[i]] += n_.x()*n_.y()*processorGroups[i];
303  }
304 
305  return finalDecomp;
306 }
307 
308 
309 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
310 
312 :
313  geometric(decompositionDict, typeName)
314 {}
315 
316 
317 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
318 
320 (
321  const pointField& points
322 )
323 {
324  if (!Pstream::parRun())
325  {
326  return decomposeOneProc(points);
327  }
328  else
329  {
330  globalIndex globalNumbers(points.size());
331 
332  // Collect all points on master
333  if (Pstream::master())
334  {
335  pointField allPoints(globalNumbers.size());
336 
337  label nTotalPoints = 0;
338  // Master first
339  SubField<point>(allPoints, points.size()) = points;
340  nTotalPoints += points.size();
341 
342  // Add slaves
343  for (int slave=1; slave<Pstream::nProcs(); slave++)
344  {
345  IPstream fromSlave(Pstream::commsTypes::scheduled, slave);
346  pointField nbrPoints(fromSlave);
348  (
349  allPoints,
350  nbrPoints.size(),
351  nTotalPoints
352  ) = nbrPoints;
353  nTotalPoints += nbrPoints.size();
354  }
355 
356  // Decompose
357  labelList finalDecomp(decomposeOneProc(allPoints));
358 
359  // Send back
360  for (int slave=1; slave<Pstream::nProcs(); slave++)
361  {
363  toSlave << SubField<label>
364  (
365  finalDecomp,
366  globalNumbers.localSize(slave),
367  globalNumbers.offset(slave)
368  );
369  }
370  // Get my own part
371  finalDecomp.setSize(points.size());
372 
373  return finalDecomp;
374  }
375  else
376  {
377  // Send my points
378  {
379  OPstream toMaster
380  (
383  );
384  toMaster<< points;
385  }
386 
387  // Receive back decomposition
388  IPstream fromMaster
389  (
392  );
393  labelList finalDecomp(fromMaster);
394 
395  return finalDecomp;
396  }
397  }
398 }
399 
400 
402 (
403  const pointField& points,
404  const scalarField& weights
405 )
406 {
407  checkWeights(points, weights);
408 
409  if (!Pstream::parRun())
410  {
411  return decomposeOneProc(points, weights);
412  }
413  else
414  {
415  globalIndex globalNumbers(points.size());
416 
417  // Collect all points on master
418  if (Pstream::master())
419  {
420  pointField allPoints(globalNumbers.size());
421  scalarField allWeights(allPoints.size());
422 
423  label nTotalPoints = 0;
424  // Master first
425  SubField<point>(allPoints, points.size()) = points;
426  SubField<scalar>(allWeights, points.size()) = weights;
427  nTotalPoints += points.size();
428 
429  // Add slaves
430  for (int slave=1; slave<Pstream::nProcs(); slave++)
431  {
432  IPstream fromSlave(Pstream::commsTypes::scheduled, slave);
433  pointField nbrPoints(fromSlave);
434  scalarField nbrWeights(fromSlave);
436  (
437  allPoints,
438  nbrPoints.size(),
439  nTotalPoints
440  ) = nbrPoints;
442  (
443  allWeights,
444  nbrWeights.size(),
445  nTotalPoints
446  ) = nbrWeights;
447  nTotalPoints += nbrPoints.size();
448  }
449 
450  // Decompose
451  labelList finalDecomp(decomposeOneProc(allPoints, allWeights));
452 
453  // Send back
454  for (int slave=1; slave<Pstream::nProcs(); slave++)
455  {
457  toSlave << SubField<label>
458  (
459  finalDecomp,
460  globalNumbers.localSize(slave),
461  globalNumbers.offset(slave)
462  );
463  }
464  // Get my own part
465  finalDecomp.setSize(points.size());
466 
467  return finalDecomp;
468  }
469  else
470  {
471  // Send my points
472  {
473  OPstream toMaster
474  (
477  );
478  toMaster<< points << weights;
479  }
480 
481  // Receive back decomposition
482  IPstream fromMaster
483  (
486  );
487  labelList finalDecomp(fromMaster);
488 
489  return finalDecomp;
490  }
491  }
492 }
493 
494 
495 // ************************************************************************* //
label k
#define forAll(list, i)
Loop across all elements in list.
Definition: UList.H:434
Macros for easy insertion into run-time selection tables.
Input inter-processor communications stream.
Definition: IPstream.H:54
void size(const label)
Override size to be inconsistent with allocated storage.
Definition: ListI.H:164
void setSize(const label)
Reset size of List.
Definition: List.C:281
Output inter-processor communications stream.
Definition: OPstream.H:54
Pre-declare related SubField type.
Definition: SubField.H:63
static int masterNo()
Process index of the master.
Definition: UPstream.H:417
static bool master(const label communicator=0)
Am I the master process.
Definition: UPstream.H:423
static label nProcs(const label communicator=0)
Number of processes in parallel run.
Definition: UPstream.H:411
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:399
Abstract base class for decomposition.
Geometrical domain decomposition.
Definition: geometric.H:52
simple(const dictionary &decompositionDict)
Construct given the decomposition dictionary.
Definition: simple.C:311
virtual labelList decompose(const pointField &)
Like decompose but with uniform weights on the points.
Definition: simple.C:320
A list of keyword definitions, which are a keyword followed by any number of values (e....
Definition: dictionary.H:162
Calculates a unique integer (label so might not have enough room - 2G max) for processor + local inde...
Definition: globalIndex.H:64
label localSize() const
My local size.
Definition: globalIndexI.H:60
label size() const
Global sum of localSizes.
Definition: globalIndexI.H:66
label offset(const label proci) const
Start of proci data.
Definition: globalIndexI.H:48
const pointField & points
addToRunTimeSelectionTable(decompositionMethod, metis, decomposer)
Namespace for OpenFOAM.
List< label > labelList
A List of labels.
Definition: labelList.H:56
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:59
dimensioned< Type > sum(const DimensionedField< Type, GeoMesh > &df)
vectorField pointField
pointField is a vectorField.
Definition: pointFieldFwd.H:42
complex limit(const complex &, const complex &)
Definition: complexI.H:202
Field< scalar > scalarField
Specialisation of Field<T> for scalar.
void sort(UList< T > &)
Definition: UList.C:115
static bool less(const vector &x, const vector &y)
To compare normals.