UPstream.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration | Website: https://openfoam.org
5  \\ / A nd | Copyright (C) 2011-2023 OpenFOAM Foundation
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*---------------------------------------------------------------------------*/
25 
26 #include "UPstream.H"
27 #include "PstreamReduceOps.H"
28 #include "OSspecific.H"
29 #include "PstreamGlobals.H"
30 #include "SubList.H"
31 #include "allReduce.H"
32 
33 #include <mpi.h>
34 
35 #include <cstring>
36 #include <cstdlib>
37 #include <csignal>
38 
39 #if defined(WM_SP)
40  #define MPI_SCALAR MPI_FLOAT
41 #elif defined(WM_DP)
42  #define MPI_SCALAR MPI_DOUBLE
43 #elif defined(WM_LP)
44  #define MPI_SCALAR MPI_LONG_DOUBLE
45 #endif
46 
47 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
48 
49 // NOTE:
50 // valid parallel options vary between implementations, but flag common ones.
51 // if they are not removed by MPI_Init(), the subsequent argument processing
52 // will notice that they are wrong
53 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
54 {
55  validParOptions.insert("np", "");
56  validParOptions.insert("p4pg", "PI file");
57  validParOptions.insert("p4wd", "directory");
58  validParOptions.insert("p4amslave", "");
59  validParOptions.insert("p4yourname", "hostname");
60  validParOptions.insert("machinefile", "machine file");
61 }
62 
63 
64 bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
65 {
66  // MPI_Init(&argc, &argv);
67  int provided_thread_support;
68  MPI_Init_thread
69  (
70  &argc,
71  &argv,
72  (
73  needsThread
74  ? MPI_THREAD_MULTIPLE
75  : MPI_THREAD_SINGLE
76  ),
77  &provided_thread_support
78  );
79 
80  // int numprocs;
81  // MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
82  // int myRank;
83  // MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
84 
85  int myGlobalRank;
86  MPI_Comm_rank(MPI_COMM_WORLD, &myGlobalRank);
87  MPI_Comm_split
88  (
89  MPI_COMM_WORLD,
90  1,
91  myGlobalRank,
93  );
94 
95  int numprocs;
96  MPI_Comm_size(PstreamGlobals::MPI_COMM_FOAM, &numprocs);
97  int myRank;
98  MPI_Comm_rank(PstreamGlobals::MPI_COMM_FOAM, &myRank);
99 
100  if (debug)
101  {
102  Pout<< "UPstream::init : initialised with numProcs:" << numprocs
103  << " myRank:" << myRank << endl;
104  }
105 
106  if (numprocs <= 1)
107  {
109  << "bool IPstream::init(int& argc, char**& argv) : "
110  "attempt to run parallel on 1 processor"
112  }
113 
114 
115  // Initialise parallel structure
116  setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
117 
118  #ifndef SGIMPI
119  string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
120 
121  if (bufferSizeName.size())
122  {
123  int bufferSize = atoi(bufferSizeName.c_str());
124 
125  if (bufferSize)
126  {
127  MPI_Buffer_attach(new char[bufferSize], bufferSize);
128  }
129  }
130  else
131  {
133  << "UPstream::init(int& argc, char**& argv) : "
134  << "environment variable MPI_BUFFER_SIZE not defined"
136  }
137  #endif
138 
139  // int processorNameLen;
140  // char processorName[MPI_MAX_PROCESSOR_NAME];
141  //
142  // MPI_Get_processor_name(processorName, &processorNameLen);
143  // processorName[processorNameLen] = '\0';
144  // Pout<< "Processor name:" << processorName << endl;
145 
146  return true;
147 }
148 
149 
150 void Foam::UPstream::exit(int errnum)
151 {
152  if (debug)
153  {
154  Pout<< "UPstream::exit." << endl;
155  }
156 
157  #ifndef SGIMPI
158  int size;
159  char* buff;
160  MPI_Buffer_detach(&buff, &size);
161  delete[] buff;
162  #endif
163 
165  {
168 
170  << "There are still " << n << " outstanding MPI_Requests." << endl
171  << "This means that your code exited before doing a"
172  << " UPstream::waitRequests()." << endl
173  << "This should not happen for a normal code exit."
174  << endl;
175  }
176 
177  // Clean mpi communicators
178  forAll(myProcNo_, communicator)
179  {
180  if (myProcNo_[communicator] != -1)
181  {
182  freePstreamCommunicator(communicator);
183  }
184  }
185 
186  if (errnum == 0)
187  {
188  MPI_Finalize();
189  ::exit(errnum);
190  }
191  else
192  {
193  MPI_Abort(PstreamGlobals::MPI_COMM_FOAM, errnum);
194  }
195 }
196 
197 
199 {
200  MPI_Abort(PstreamGlobals::MPI_COMM_FOAM, 1);
201 }
202 
203 
204 void Foam::reduce
205 (
206  scalar& Value,
207  const sumOp<scalar>& bop,
208  const int tag,
209  const label communicator
210 )
211 {
212  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
213  {
214  Pout<< "** reducing:" << Value << " with comm:" << communicator
215  << " warnComm:" << UPstream::warnComm
216  << endl;
218  }
219  allReduce(Value, 1, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
220 }
221 
222 
223 void Foam::reduce
224 (
225  scalar& Value,
226  const minOp<scalar>& bop,
227  const int tag,
228  const label communicator
229 )
230 {
231  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
232  {
233  Pout<< "** reducing:" << Value << " with comm:" << communicator
234  << " warnComm:" << UPstream::warnComm
235  << endl;
237  }
238  allReduce(Value, 1, MPI_SCALAR, MPI_MIN, bop, tag, communicator);
239 }
240 
241 
242 void Foam::reduce
243 (
244  vector2D& Value,
245  const sumOp<vector2D>& bop,
246  const int tag,
247  const label communicator
248 )
249 {
250  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
251  {
252  Pout<< "** reducing:" << Value << " with comm:" << communicator
253  << " warnComm:" << UPstream::warnComm
254  << endl;
256  }
257  allReduce(Value, 2, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
258 }
259 
260 
261 void Foam::sumReduce
262 (
263  scalar& Value,
264  label& Count,
265  const int tag,
266  const label communicator
267 )
268 {
269  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
270  {
271  Pout<< "** reducing:" << Value << " with comm:" << communicator
272  << " warnComm:" << UPstream::warnComm
273  << endl;
275  }
276  vector2D twoScalars(Value, scalar(Count));
277  reduce(twoScalars, sumOp<vector2D>(), tag, communicator);
278 
279  Value = twoScalars.x();
280  Count = twoScalars.y();
281 }
282 
283 
284 void Foam::reduce
285 (
286  scalar& Value,
287  const sumOp<scalar>& bop,
288  const int tag,
289  const label communicator,
290  label& requestID
291 )
292 {
293 #ifdef MPIX_COMM_TYPE_SHARED
294  // Assume mpich2 with non-blocking collectives extensions. Once mpi3
295  // is available this will change.
296  MPI_Request request;
297  scalar v = Value;
298  MPIX_Ireduce
299  (
300  &v,
301  &Value,
302  1,
303  MPI_SCALAR,
304  MPI_SUM,
305  0, // root
306  PstreamGlobals::MPICommunicators_[communicator],
307  &request
308  );
309 
310  requestID = PstreamGlobals::outstandingRequests_.size();
311  PstreamGlobals::outstandingRequests_.append(request);
312 
313  if (UPstream::debug)
314  {
315  Pout<< "UPstream::allocateRequest for non-blocking reduce"
316  << " : request:" << requestID
317  << endl;
318  }
319 #else
320  // Non-blocking not yet implemented in mpi
321  reduce(Value, bop, tag, communicator);
322  requestID = -1;
323 #endif
324 }
325 
326 
328 (
329  const labelUList& sendData,
330  labelUList& recvData,
331  const label communicator
332 )
333 {
334  label np = nProcs(communicator);
335 
336  if (sendData.size() != np || recvData.size() != np)
337  {
339  << "Size of sendData " << sendData.size()
340  << " or size of recvData " << recvData.size()
341  << " is not equal to the number of processors in the domain "
342  << np
344  }
345 
346  if (!UPstream::parRun())
347  {
348  recvData.deepCopy(sendData);
349  }
350  else
351  {
352  if
353  (
354  MPI_Alltoall
355  (
356  const_cast<label*>(sendData.begin()),
357  sizeof(label),
358  MPI_BYTE,
359  recvData.begin(),
360  sizeof(label),
361  MPI_BYTE,
363  )
364  )
365  {
367  << "MPI_Alltoall failed for " << sendData
368  << " on communicator " << communicator
370  }
371  }
372 }
373 
374 
376 (
377  const char* sendData,
378  const UList<int>& sendSizes,
379  const UList<int>& sendOffsets,
380 
381  char* recvData,
382  const UList<int>& recvSizes,
383  const UList<int>& recvOffsets,
384 
385  const label communicator
386 )
387 {
388  label np = nProcs(communicator);
389 
390  if
391  (
392  sendSizes.size() != np
393  || sendOffsets.size() != np
394  || recvSizes.size() != np
395  || recvOffsets.size() != np
396  )
397  {
399  << "Size of sendSize " << sendSizes.size()
400  << ", sendOffsets " << sendOffsets.size()
401  << ", recvSizes " << recvSizes.size()
402  << " or recvOffsets " << recvOffsets.size()
403  << " is not equal to the number of processors in the domain "
404  << np
406  }
407 
408  if (!UPstream::parRun())
409  {
410  if (recvSizes[0] != sendSizes[0])
411  {
413  << "Bytes to send " << sendSizes[0]
414  << " does not equal bytes to receive " << recvSizes[0]
416  }
417  memmove(recvData, &sendData[sendOffsets[0]], recvSizes[0]);
418  }
419  else
420  {
421  if
422  (
423  MPI_Alltoallv
424  (
425  const_cast<char*>(sendData),
426  const_cast<int*>(sendSizes.begin()),
427  const_cast<int*>(sendOffsets.begin()),
428  MPI_BYTE,
429  recvData,
430  const_cast<int*>(recvSizes.begin()),
431  const_cast<int*>(recvOffsets.begin()),
432  MPI_BYTE,
434  )
435  )
436  {
438  << "MPI_Alltoallv failed for sendSizes " << sendSizes
439  << " recvSizes " << recvSizes
440  << " communicator " << communicator
442  }
443  }
444 }
445 
446 
448 (
449  const char* sendData,
450  int sendSize,
451 
452  char* recvData,
453  const UList<int>& recvSizes,
454  const UList<int>& recvOffsets,
455  const label communicator
456 )
457 {
458  label np = nProcs(communicator);
459 
460  if
461  (
462  UPstream::master(communicator)
463  && (recvSizes.size() != np || recvOffsets.size() < np)
464  )
465  {
466  // Note: allow recvOffsets to be e.g. 1 larger than np so we
467  // can easily loop over the result
468 
470  << "Size of recvSizes " << recvSizes.size()
471  << " or recvOffsets " << recvOffsets.size()
472  << " is not equal to the number of processors in the domain "
473  << np
475  }
476 
477  if (!UPstream::parRun())
478  {
479  memmove(recvData, sendData, sendSize);
480  }
481  else
482  {
483  if
484  (
485  MPI_Gatherv
486  (
487  const_cast<char*>(sendData),
488  sendSize,
489  MPI_BYTE,
490  recvData,
491  const_cast<int*>(recvSizes.begin()),
492  const_cast<int*>(recvOffsets.begin()),
493  MPI_BYTE,
494  0,
495  MPI_Comm(PstreamGlobals::MPICommunicators_[communicator])
496  )
497  )
498  {
500  << "MPI_Gatherv failed for sendSize " << sendSize
501  << " recvSizes " << recvSizes
502  << " communicator " << communicator
504  }
505  }
506 }
507 
508 
510 (
511  const char* sendData,
512  const UList<int>& sendSizes,
513  const UList<int>& sendOffsets,
514 
515  char* recvData,
516  int recvSize,
517  const label communicator
518 )
519 {
520  label np = nProcs(communicator);
521 
522  if
523  (
524  UPstream::master(communicator)
525  && (sendSizes.size() != np || sendOffsets.size() != np)
526  )
527  {
529  << "Size of sendSizes " << sendSizes.size()
530  << " or sendOffsets " << sendOffsets.size()
531  << " is not equal to the number of processors in the domain "
532  << np
534  }
535 
536  if (!UPstream::parRun())
537  {
538  memmove(recvData, sendData, recvSize);
539  }
540  else
541  {
542  if
543  (
544  MPI_Scatterv
545  (
546  const_cast<char*>(sendData),
547  const_cast<int*>(sendSizes.begin()),
548  const_cast<int*>(sendOffsets.begin()),
549  MPI_BYTE,
550  recvData,
551  recvSize,
552  MPI_BYTE,
553  0,
554  MPI_Comm(PstreamGlobals::MPICommunicators_[communicator])
555  )
556  )
557  {
559  << "MPI_Scatterv failed for sendSizes " << sendSizes
560  << " sendOffsets " << sendOffsets
561  << " communicator " << communicator
563  }
564  }
565 }
566 
567 
568 void Foam::UPstream::allocatePstreamCommunicator
569 (
570  const label parentIndex,
571  const label index
572 )
573 {
574  if (index == PstreamGlobals::MPIGroups_.size())
575  {
576  // Extend storage with dummy values
577  MPI_Group newGroup = MPI_GROUP_NULL;
578  PstreamGlobals::MPIGroups_.append(newGroup);
579  MPI_Comm newComm = MPI_COMM_NULL;
580  PstreamGlobals::MPICommunicators_.append(newComm);
581  }
582  else if (index > PstreamGlobals::MPIGroups_.size())
583  {
585  << "PstreamGlobals out of sync with UPstream data. Problem."
587  }
588 
589 
590  if (parentIndex == -1)
591  {
592  // Allocate world communicator
593 
594  if (index != UPstream::worldComm)
595  {
597  << "world communicator should always be index "
599  }
600 
603  MPI_Comm_group
604  (
607  );
608  MPI_Comm_rank
609  (
611  &myProcNo_[index]
612  );
613 
614  // Set the number of processes to the actual number
615  int numProcs;
616  MPI_Comm_size(PstreamGlobals::MPICommunicators_[index], &numProcs);
617 
618  // procIDs_[index] = identityMap(numProcs);
619  procIDs_[index].setSize(numProcs);
620  forAll(procIDs_[index], i)
621  {
622  procIDs_[index][i] = i;
623  }
624  }
625  else
626  {
627  // Create new group
628  MPI_Group_incl
629  (
630  PstreamGlobals::MPIGroups_[parentIndex],
631  procIDs_[index].size(),
632  procIDs_[index].begin(),
634  );
635 
636  // Create new communicator
637  MPI_Comm_create
638  (
642  );
643 
644  if (PstreamGlobals::MPICommunicators_[index] == MPI_COMM_NULL)
645  {
646  myProcNo_[index] = -1;
647  }
648  else
649  {
650  if
651  (
652  MPI_Comm_rank
653  (
655  &myProcNo_[index]
656  )
657  )
658  {
660  << "Problem :"
661  << " when allocating communicator at " << index
662  << " from ranks " << procIDs_[index]
663  << " of parent " << parentIndex
664  << " cannot find my own rank"
666  }
667  }
668  }
669 }
670 
671 
672 void Foam::UPstream::freePstreamCommunicator(const label communicator)
673 {
674  if (communicator != UPstream::worldComm)
675  {
676  if (PstreamGlobals::MPICommunicators_[communicator] != MPI_COMM_NULL)
677  {
678  // Free communicator. Sets communicator to MPI_COMM_NULL
679  MPI_Comm_free(&PstreamGlobals::MPICommunicators_[communicator]);
680  }
681  if (PstreamGlobals::MPIGroups_[communicator] != MPI_GROUP_NULL)
682  {
683  // Free greoup. Sets group to MPI_GROUP_NULL
684  MPI_Group_free(&PstreamGlobals::MPIGroups_[communicator]);
685  }
686  }
687 }
688 
689 
691 {
693 }
694 
695 
697 {
699  {
701  }
702 }
703 
704 
705 void Foam::UPstream::waitRequests(const label start)
706 {
707  if (debug)
708  {
709  Pout<< "UPstream::waitRequests : starting wait for "
711  << " outstanding requests starting at " << start << endl;
712  }
713 
715  {
716  SubList<MPI_Request> waitRequests
717  (
720  start
721  );
722 
723  if
724  (
725  MPI_Waitall
726  (
727  waitRequests.size(),
728  waitRequests.begin(),
729  MPI_STATUSES_IGNORE
730  )
731  )
732  {
734  << "MPI_Waitall returned with error" << Foam::endl;
735  }
736 
737  resetRequests(start);
738  }
739 
740  if (debug)
741  {
742  Pout<< "UPstream::waitRequests : finished wait." << endl;
743  }
744 }
745 
746 
748 {
749  if (debug)
750  {
751  Pout<< "UPstream::waitRequest : starting wait for request:" << i
752  << endl;
753  }
754 
755  if (i >= PstreamGlobals::outstandingRequests_.size())
756  {
758  << "There are " << PstreamGlobals::outstandingRequests_.size()
759  << " outstanding send requests and you are asking for i=" << i
760  << nl
761  << "Maybe you are mixing blocking/non-blocking comms?"
763  }
764 
765  if
766  (
767  MPI_Wait
768  (
770  MPI_STATUS_IGNORE
771  )
772  )
773  {
775  << "MPI_Wait returned with error" << Foam::endl;
776  }
777 
778  if (debug)
779  {
780  Pout<< "UPstream::waitRequest : finished wait for request:" << i
781  << endl;
782  }
783 }
784 
785 
787 {
788  if (debug)
789  {
790  Pout<< "UPstream::finishedRequest : checking request:" << i
791  << endl;
792  }
793 
794  if (i >= PstreamGlobals::outstandingRequests_.size())
795  {
797  << "There are " << PstreamGlobals::outstandingRequests_.size()
798  << " outstanding send requests and you are asking for i=" << i
799  << nl
800  << "Maybe you are mixing blocking/non-blocking comms?"
802  }
803 
804  int flag;
805  MPI_Test
806  (
808  &flag,
809  MPI_STATUS_IGNORE
810  );
811 
812  if (debug)
813  {
814  Pout<< "UPstream::finishedRequest : finished request:" << i
815  << endl;
816  }
817 
818  return flag != 0;
819 }
820 
821 
823 {
824  int tag;
825  if (PstreamGlobals::freedTags_.size())
826  {
828  }
829  else
830  {
831  tag = PstreamGlobals::nTags_++;
832  }
833 
834  if (debug)
835  {
836  // if (UPstream::lateBlocking > 0)
837  //{
838  // string& poutp = Pout.prefix();
839  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = 'X';
840  // Perr.prefix() = Pout.prefix();
841  //}
842  Pout<< "UPstream::allocateTag " << s
843  << " : tag:" << tag
844  << endl;
845  }
846 
847  return tag;
848 }
849 
850 
852 {
853  int tag;
854  if (PstreamGlobals::freedTags_.size())
855  {
857  }
858  else
859  {
860  tag = PstreamGlobals::nTags_++;
861  }
862 
863  if (debug)
864  {
865  // if (UPstream::lateBlocking > 0)
866  //{
867  // string& poutp = Pout.prefix();
868  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = 'X';
869  // Perr.prefix() = Pout.prefix();
870  //}
871  Pout<< "UPstream::allocateTag " << s
872  << " : tag:" << tag
873  << endl;
874  }
875 
876  return tag;
877 }
878 
879 
880 void Foam::UPstream::freeTag(const char* s, const int tag)
881 {
882  if (debug)
883  {
884  // if (UPstream::lateBlocking > 0)
885  //{
886  // string& poutp = Pout.prefix();
887  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = ' ';
888  // Perr.prefix() = Pout.prefix();
889  //}
890  Pout<< "UPstream::freeTag " << s << " tag:" << tag << endl;
891  }
893 }
894 
895 
896 void Foam::UPstream::freeTag(const word& s, const int tag)
897 {
898  if (debug)
899  {
900  // if (UPstream::lateBlocking > 0)
901  //{
902  // string& poutp = Pout.prefix();
903  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = ' ';
904  // Perr.prefix() = Pout.prefix();
905  //}
906  Pout<< "UPstream::freeTag " << s << " tag:" << tag << endl;
907  }
909 }
910 
911 
912 // ************************************************************************* //
Functions used by OpenFOAM that are specific to POSIX compliant operating systems and need to be repl...
Inter-processor communication reduction functions.
label n
#define forAll(list, i)
Loop across all elements in list.
Definition: UList.H:434
Various functions to wrap MPI_Allreduce.
T remove()
Remove and return the top element.
Definition: DynamicListI.H:351
DynamicList< T, SizeInc, SizeMult, SizeDiv > & append(const T &)
Append an element at the end of the list.
Definition: DynamicListI.H:296
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
Definition: UList.H:74
label size() const
Return the number of elements in the UList.
Definition: UListI.H:311
iterator begin()
Return an iterator to begin traversing the UList.
Definition: UListI.H:216
Helper class for allocating/freeing communicators.
Definition: UPstream.H:315
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
Definition: UPstream.C:35
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
Definition: UPstream.H:281
static void freeTag(const char *, const int tag)
Definition: UPstream.C:880
static int allocateTag(const char *)
Definition: UPstream.C:822
static bool master(const label communicator=0)
Am I the master process.
Definition: UPstream.H:423
static void allToAll(const labelUList &sendData, labelUList &recvData, const label communicator=0)
Exchange label with all processors (in the communicator).
Definition: UPstream.C:85
static label worldComm
Default communicator (all processors)
Definition: UPstream.H:278
static label nRequests()
Get number of outstanding requests.
Definition: UPstream.C:137
static void gather(const char *sendData, int sendSize, char *recvData, const UList< int > &recvSizes, const UList< int > &recvOffsets, const label communicator=0)
Receive data from all processors on the master.
Definition: UPstream.C:96
static void addValidParOptions(HashTable< string > &validParOptions)
Add the valid option this type of communications library.
Definition: UPstream.C:31
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
Definition: UPstream.C:147
static void resetRequests(const label sz)
Truncate number of outstanding requests.
Definition: UPstream.C:143
static void scatter(const char *sendData, const UList< int > &sendSizes, const UList< int > &sendOffsets, char *recvData, int recvSize, const label communicator=0)
Send data to all processors from the root of the communicator.
Definition: UPstream.C:111
static void abort()
Abort program.
Definition: UPstream.C:52
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:399
static void exit(int errnum=1)
Exit program.
Definition: UPstream.C:46
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished?
Definition: UPstream.C:155
static void waitRequest(const label i)
Wait until request i has finished.
Definition: UPstream.C:151
static void printStack(Ostream &)
Helper function to print a stack.
A class for handling words, derived from string.
Definition: word.H:62
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:306
gmvFile<< "tracers "<< particles.size()<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().x()<< " ";}gmvFile<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().y()<< " ";}gmvFile<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().z()<< " ";}gmvFile<< nl;forAll(lagrangianScalarNames, i){ word name=lagrangianScalarNames[i];IOField< scalar > s(IOobject(name, runTime.name(), cloud::prefix, mesh, IOobject::MUST_READ, IOobject::NO_WRITE))
#define WarningInFunction
Report a warning using Foam::Warning.
DynamicList< int > freedTags_
DynamicList< MPI_Group > MPIGroups_
DynamicList< MPI_Comm > MPICommunicators_
DynamicList< MPI_Request > outstandingRequests_
Vector2D< scalar > vector2D
vector2D obtained from generic Vector2D
Definition: vector2D.H:49
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:124
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:59
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:251
errorManip< error > abort(error &err)
Definition: errorManip.H:131
void allReduce(Type &Value, int count, MPI_Datatype MPIType, MPI_Op op, const BinaryOp &bop, const int tag, const label communicator)
void reduce(const List< UPstream::commsStruct > &comms, T &Value, const BinaryOp &bop, const int tag, const label comm)
string getEnv(const word &)
Return environment variable of given name.
Definition: POSIX.C:97
prefixOSstream Pout(cout, "Pout")
Definition: IOstreams.H:53
error FatalError
UList< label > labelUList
Definition: UList.H:65
static const char nl
Definition: Ostream.H:260
void sumReduce(T &Value, label &Count, const int tag=Pstream::msgType(), const label comm=UPstream::worldComm)