gatherScatterList.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 Description
25  Gather data from all processors onto single processor according to some
26  communication schedule (usually linear-to-master or tree-to-master).
27  The gathered data will be a list with element procID the data from processor
28  procID. Before calling every processor should insert its value into
29  Values[UPstream::myProcNo(comm)].
30  Note: after gather every processor only knows its own data and that of the
31  processors below it. Only the 'master' of the communication schedule holds
32  a fully filled List. Use scatter to distribute the data.
33 
34 \*---------------------------------------------------------------------------*/
35 
36 #include "IPstream.H"
37 #include "OPstream.H"
38 #include "contiguous.H"
39 
40 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
41 
42 namespace Foam
43 {
44 
45 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
46 
47 template<class T>
49 (
50  const List<UPstream::commsStruct>& comms,
51  List<T>& Values,
52  const int tag,
53  const label comm
54 )
55 {
56  if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
57  {
58  if (Values.size() != UPstream::nProcs(comm))
59  {
61  << "Size of list:" << Values.size()
62  << " does not equal the number of processors:"
63  << UPstream::nProcs(comm)
65  }
66 
67  // Get my communication order
68  const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
69 
70  // Receive from my downstairs neighbours
71  forAll(myComm.below(), belowI)
72  {
73  label belowID = myComm.below()[belowI];
74  const labelList& belowLeaves = comms[belowID].allBelow();
75 
76  if (contiguous<T>())
77  {
78  List<T> receivedValues(belowLeaves.size() + 1);
79 
81  (
83  belowID,
84  reinterpret_cast<char*>(receivedValues.begin()),
85  receivedValues.byteSize(),
86  tag,
87  comm
88  );
89 
90  Values[belowID] = receivedValues[0];
91 
92  forAll(belowLeaves, leafI)
93  {
94  Values[belowLeaves[leafI]] = receivedValues[leafI + 1];
95  }
96  }
97  else
98  {
99  IPstream fromBelow(UPstream::scheduled, belowID, 0, tag, comm);
100  fromBelow >> Values[belowID];
101 
102  if (debug & 2)
103  {
104  Pout<< " received through "
105  << belowID << " data from:" << belowID
106  << " data:" << Values[belowID] << endl;
107  }
108 
109  // Receive from all other processors below belowID
110  forAll(belowLeaves, leafI)
111  {
112  label leafID = belowLeaves[leafI];
113  fromBelow >> Values[leafID];
114 
115  if (debug & 2)
116  {
117  Pout<< " received through "
118  << belowID << " data from:" << leafID
119  << " data:" << Values[leafID] << endl;
120  }
121  }
122  }
123  }
124 
125  // Send up from Values:
126  // - my own value first
127  // - all belowLeaves next
128  if (myComm.above() != -1)
129  {
130  const labelList& belowLeaves = myComm.allBelow();
131 
132  if (debug & 2)
133  {
134  Pout<< " sending to " << myComm.above()
135  << " data from me:" << UPstream::myProcNo(comm)
136  << " data:" << Values[UPstream::myProcNo(comm)] << endl;
137  }
138 
139  if (contiguous<T>())
140  {
141  List<T> sendingValues(belowLeaves.size() + 1);
142  sendingValues[0] = Values[UPstream::myProcNo(comm)];
143 
144  forAll(belowLeaves, leafI)
145  {
146  sendingValues[leafI + 1] = Values[belowLeaves[leafI]];
147  }
148 
150  (
152  myComm.above(),
153  reinterpret_cast<const char*>(sendingValues.begin()),
154  sendingValues.byteSize(),
155  tag,
156  comm
157  );
158  }
159  else
160  {
161  OPstream toAbove
162  (
164  myComm.above(),
165  0,
166  tag,
167  comm
168  );
169  toAbove << Values[UPstream::myProcNo(comm)];
170 
171  forAll(belowLeaves, leafI)
172  {
173  label leafID = belowLeaves[leafI];
174 
175  if (debug & 2)
176  {
177  Pout<< " sending to "
178  << myComm.above() << " data from:" << leafID
179  << " data:" << Values[leafID] << endl;
180  }
181  toAbove << Values[leafID];
182  }
183  }
184  }
185  }
186 }
187 
188 
189 template<class T>
190 void Pstream::gatherList(List<T>& Values, const int tag, const label comm)
191 {
193  {
194  gatherList(UPstream::linearCommunication(comm), Values, tag, comm);
195  }
196  else
197  {
198  gatherList(UPstream::treeCommunication(comm), Values, tag, comm);
199  }
200 }
201 
202 
203 template<class T>
205 (
206  const List<UPstream::commsStruct>& comms,
207  List<T>& Values,
208  const int tag,
209  const label comm
210 )
211 {
212  if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
213  {
214  if (Values.size() != UPstream::nProcs(comm))
215  {
217  << "Size of list:" << Values.size()
218  << " does not equal the number of processors:"
219  << UPstream::nProcs(comm)
221  }
222 
223  // Get my communication order
224  const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
225 
226  // Reveive from up
227  if (myComm.above() != -1)
228  {
229  const labelList& notBelowLeaves = myComm.allNotBelow();
230 
231  if (contiguous<T>())
232  {
233  List<T> receivedValues(notBelowLeaves.size());
234 
236  (
238  myComm.above(),
239  reinterpret_cast<char*>(receivedValues.begin()),
240  receivedValues.byteSize(),
241  tag,
242  comm
243  );
244 
245  forAll(notBelowLeaves, leafI)
246  {
247  Values[notBelowLeaves[leafI]] = receivedValues[leafI];
248  }
249  }
250  else
251  {
252  IPstream fromAbove
253  (
255  myComm.above(),
256  0,
257  tag,
258  comm
259  );
260 
261  forAll(notBelowLeaves, leafI)
262  {
263  label leafID = notBelowLeaves[leafI];
264  fromAbove >> Values[leafID];
265 
266  if (debug)
267  {
268  Pout<< " received through "
269  << myComm.above() << " data for:" << leafID
270  << " data:" << Values[leafID] << endl;
271  }
272  }
273  }
274  }
275 
276  // Send to my downstairs neighbours
277  forAllReverse(myComm.below(), belowI)
278  {
279  label belowID = myComm.below()[belowI];
280  const labelList& notBelowLeaves = comms[belowID].allNotBelow();
281 
282  if (contiguous<T>())
283  {
284  List<T> sendingValues(notBelowLeaves.size());
285 
286  forAll(notBelowLeaves, leafI)
287  {
288  sendingValues[leafI] = Values[notBelowLeaves[leafI]];
289  }
290 
292  (
294  belowID,
295  reinterpret_cast<const char*>(sendingValues.begin()),
296  sendingValues.byteSize(),
297  tag,
298  comm
299  );
300  }
301  else
302  {
303  OPstream toBelow(UPstream::scheduled, belowID, 0, tag, comm);
304 
305  // Send data destined for all other processors below belowID
306  forAll(notBelowLeaves, leafI)
307  {
308  label leafID = notBelowLeaves[leafI];
309  toBelow << Values[leafID];
310 
311  if (debug)
312  {
313  Pout<< " sent through "
314  << belowID << " data for:" << leafID
315  << " data:" << Values[leafID] << endl;
316  }
317  }
318  }
319  }
320  }
321 }
322 
323 
324 template<class T>
325 void Pstream::scatterList(List<T>& Values, const int tag, const label comm)
326 {
328  {
329  scatterList(UPstream::linearCommunication(comm), Values, tag, comm);
330  }
331  else
332  {
333  scatterList(UPstream::treeCommunication(comm), Values, tag, comm);
334  }
335 }
336 
337 
338 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
339 
340 } // End namespace Foam
341 
342 // ************************************************************************* //
static void scatterList(const List< commsStruct > &comms, List< T > &Values, const int tag, const label comm)
Scatter data. Reverse of gatherList.
#define forAll(list, i)
Loop across all elements in list.
Definition: UList.H:428
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:59
error FatalError
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:319
A 1D array of objects of type <T>, where the size of the vector is known and used for subscript bound...
Definition: HashTable.H:59
void size(const label)
Override size to be inconsistent with allocated storage.
Definition: ListI.H:76
static int myProcNo(const label communicator=0)
Number of this process (starting from masterNo() = 0)
Definition: UPstream.H:417
static int nProcsSimpleSum
Number of processors at which the sum algorithm changes from linear.
Definition: UPstream.H:265
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:253
Template function to specify if the data of a type are contiguous.
#define forAllReverse(list, i)
Reverse loop across all elements in list.
Definition: UList.H:440
static const List< commsStruct > & linearCommunication(const label communicator=0)
Communication schedule for linear all-to-master (proc 0)
Definition: UPstream.H:447
Input inter-processor communications stream.
Definition: IPstream.H:50
std::streamsize byteSize() const
Return the binary size in number of characters of the UList.
Definition: UList.C:100
const labelList & allBelow() const
Definition: UPstream.H:135
iterator begin()
Return an iterator to begin traversing the UList.
Definition: UListI.H:216
static const List< commsStruct > & treeCommunication(const label communicator=0)
Communication schedule for tree all-to-master (proc 0)
Definition: UPstream.H:456
errorManip< error > abort(error &err)
Definition: errorManip.H:131
Structure for communicating between processors.
Definition: UPstream.H:76
prefixOSstream Pout(cout,"Pout")
Definition: IOstreams.H:53
static label read(const commsTypes commsType, const int fromProcNo, char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label communicator=0)
Read into given buffer from given processor and return the.
Definition: UIPread.C:79
static bool write(const commsTypes commsType, const int toProcNo, const char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label communicator=0)
Write given buffer to given processor.
Definition: UOPwrite.C:34
Output inter-processor communications stream.
Definition: OPstream.H:50
const labelList & allNotBelow() const
Definition: UPstream.H:140
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:393
static label nProcs(const label communicator=0)
Number of processes in parallel run.
Definition: UPstream.H:399
const labelList & below() const
Definition: UPstream.H:130
label above() const
Definition: UPstream.H:125
static void gatherList(const List< commsStruct > &comms, List< T > &Values, const int tag, const label comm)
Gather data but keep individual values separate.
Namespace for OpenFOAM.