UPstream.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration | Website: https://openfoam.org
5  \\ / A nd | Copyright (C) 2011-2018 OpenFOAM Foundation
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 Note
25  The const_cast used in this file is a temporary hack for to work around
26  bugs in OpenMPI for versions < 1.7.4
27 
28 \*---------------------------------------------------------------------------*/
29 
30 #include "UPstream.H"
31 #include "PstreamReduceOps.H"
32 #include "OSspecific.H"
33 #include "PstreamGlobals.H"
34 #include "SubList.H"
35 #include "allReduce.H"
36 
37 #include <mpi.h>
38 
39 #include <cstring>
40 #include <cstdlib>
41 #include <csignal>
42 
43 #if defined(WM_SP)
44  #define MPI_SCALAR MPI_FLOAT
45 #elif defined(WM_DP)
46  #define MPI_SCALAR MPI_DOUBLE
47 #elif defined(WM_LP)
48  #define MPI_SCALAR MPI_LONG_DOUBLE
49 #endif
50 
51 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
52 
53 // NOTE:
54 // valid parallel options vary between implementations, but flag common ones.
55 // if they are not removed by MPI_Init(), the subsequent argument processing
56 // will notice that they are wrong
57 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
58 {
59  validParOptions.insert("np", "");
60  validParOptions.insert("p4pg", "PI file");
61  validParOptions.insert("p4wd", "directory");
62  validParOptions.insert("p4amslave", "");
63  validParOptions.insert("p4yourname", "hostname");
64  validParOptions.insert("machinefile", "machine file");
65 }
66 
67 
68 bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
69 {
70  // MPI_Init(&argc, &argv);
71  int provided_thread_support;
72  MPI_Init_thread
73  (
74  &argc,
75  &argv,
76  (
77  needsThread
78  ? MPI_THREAD_MULTIPLE
79  : MPI_THREAD_SINGLE
80  ),
81  &provided_thread_support
82  );
83 
84  // int numprocs;
85  // MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
86  // int myRank;
87  // MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
88 
89  int myGlobalRank;
90  MPI_Comm_rank(MPI_COMM_WORLD, &myGlobalRank);
91  MPI_Comm_split
92  (
93  MPI_COMM_WORLD,
94  1,
95  myGlobalRank,
97  );
98 
99  int numprocs;
100  MPI_Comm_size(PstreamGlobals::MPI_COMM_FOAM, &numprocs);
101  int myRank;
102  MPI_Comm_rank(PstreamGlobals::MPI_COMM_FOAM, &myRank);
103 
104  if (debug)
105  {
106  Pout<< "UPstream::init : initialised with numProcs:" << numprocs
107  << " myRank:" << myRank << endl;
108  }
109 
110  if (numprocs <= 1)
111  {
113  << "bool IPstream::init(int& argc, char**& argv) : "
114  "attempt to run parallel on 1 processor"
116  }
117 
118 
119  // Initialise parallel structure
120  setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
121 
122  #ifndef SGIMPI
123  string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
124 
125  if (bufferSizeName.size())
126  {
127  int bufferSize = atoi(bufferSizeName.c_str());
128 
129  if (bufferSize)
130  {
131  MPI_Buffer_attach(new char[bufferSize], bufferSize);
132  }
133  }
134  else
135  {
137  << "UPstream::init(int& argc, char**& argv) : "
138  << "environment variable MPI_BUFFER_SIZE not defined"
140  }
141  #endif
142 
143  // int processorNameLen;
144  // char processorName[MPI_MAX_PROCESSOR_NAME];
145  //
146  // MPI_Get_processor_name(processorName, &processorNameLen);
147  // processorName[processorNameLen] = '\0';
148  // Pout<< "Processor name:" << processorName << endl;
149 
150  return true;
151 }
152 
153 
154 void Foam::UPstream::exit(int errnum)
155 {
156  if (debug)
157  {
158  Pout<< "UPstream::exit." << endl;
159  }
160 
161  #ifndef SGIMPI
162  int size;
163  char* buff;
164  MPI_Buffer_detach(&buff, &size);
165  delete[] buff;
166  #endif
167 
169  {
172 
174  << "There are still " << n << " outstanding MPI_Requests." << endl
175  << "This means that your code exited before doing a"
176  << " UPstream::waitRequests()." << endl
177  << "This should not happen for a normal code exit."
178  << endl;
179  }
180 
181  // Clean mpi communicators
182  forAll(myProcNo_, communicator)
183  {
184  if (myProcNo_[communicator] != -1)
185  {
186  freePstreamCommunicator(communicator);
187  }
188  }
189 
190  if (errnum == 0)
191  {
192  MPI_Finalize();
193  ::exit(errnum);
194  }
195  else
196  {
197  MPI_Abort(PstreamGlobals::MPI_COMM_FOAM, errnum);
198  }
199 }
200 
201 
203 {
204  MPI_Abort(PstreamGlobals::MPI_COMM_FOAM, 1);
205 }
206 
207 
208 void Foam::reduce
209 (
210  scalar& Value,
211  const sumOp<scalar>& bop,
212  const int tag,
213  const label communicator
214 )
215 {
216  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
217  {
218  Pout<< "** reducing:" << Value << " with comm:" << communicator
219  << " warnComm:" << UPstream::warnComm
220  << endl;
222  }
223  allReduce(Value, 1, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
224 }
225 
226 
227 void Foam::reduce
228 (
229  scalar& Value,
230  const minOp<scalar>& bop,
231  const int tag,
232  const label communicator
233 )
234 {
235  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
236  {
237  Pout<< "** reducing:" << Value << " with comm:" << communicator
238  << " warnComm:" << UPstream::warnComm
239  << endl;
241  }
242  allReduce(Value, 1, MPI_SCALAR, MPI_MIN, bop, tag, communicator);
243 }
244 
245 
246 void Foam::reduce
247 (
248  vector2D& Value,
249  const sumOp<vector2D>& bop,
250  const int tag,
251  const label communicator
252 )
253 {
254  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
255  {
256  Pout<< "** reducing:" << Value << " with comm:" << communicator
257  << " warnComm:" << UPstream::warnComm
258  << endl;
260  }
261  allReduce(Value, 2, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
262 }
263 
264 
265 void Foam::sumReduce
266 (
267  scalar& Value,
268  label& Count,
269  const int tag,
270  const label communicator
271 )
272 {
273  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
274  {
275  Pout<< "** reducing:" << Value << " with comm:" << communicator
276  << " warnComm:" << UPstream::warnComm
277  << endl;
279  }
280  vector2D twoScalars(Value, scalar(Count));
281  reduce(twoScalars, sumOp<vector2D>(), tag, communicator);
282 
283  Value = twoScalars.x();
284  Count = twoScalars.y();
285 }
286 
287 
288 void Foam::reduce
289 (
290  scalar& Value,
291  const sumOp<scalar>& bop,
292  const int tag,
293  const label communicator,
294  label& requestID
295 )
296 {
297 #ifdef MPIX_COMM_TYPE_SHARED
298  // Assume mpich2 with non-blocking collectives extensions. Once mpi3
299  // is available this will change.
300  MPI_Request request;
301  scalar v = Value;
302  MPIX_Ireduce
303  (
304  &v,
305  &Value,
306  1,
307  MPI_SCALAR,
308  MPI_SUM,
309  0, // root
310  PstreamGlobals::MPICommunicators_[communicator],
311  &request
312  );
313 
314  requestID = PstreamGlobals::outstandingRequests_.size();
315  PstreamGlobals::outstandingRequests_.append(request);
316 
317  if (UPstream::debug)
318  {
319  Pout<< "UPstream::allocateRequest for non-blocking reduce"
320  << " : request:" << requestID
321  << endl;
322  }
323 #else
324  // Non-blocking not yet implemented in mpi
325  reduce(Value, bop, tag, communicator);
326  requestID = -1;
327 #endif
328 }
329 
330 
332 (
333  const labelUList& sendData,
334  labelUList& recvData,
335  const label communicator
336 )
337 {
338  label np = nProcs(communicator);
339 
340  if (sendData.size() != np || recvData.size() != np)
341  {
343  << "Size of sendData " << sendData.size()
344  << " or size of recvData " << recvData.size()
345  << " is not equal to the number of processors in the domain "
346  << np
348  }
349 
350  if (!UPstream::parRun())
351  {
352  recvData.deepCopy(sendData);
353  }
354  else
355  {
356  if
357  (
358  MPI_Alltoall
359  (
360  const_cast<label*>(sendData.begin()),
361  sizeof(label),
362  MPI_BYTE,
363  recvData.begin(),
364  sizeof(label),
365  MPI_BYTE,
367  )
368  )
369  {
371  << "MPI_Alltoall failed for " << sendData
372  << " on communicator " << communicator
374  }
375  }
376 }
377 
378 
380 (
381  const char* sendData,
382  const UList<int>& sendSizes,
383  const UList<int>& sendOffsets,
384 
385  char* recvData,
386  const UList<int>& recvSizes,
387  const UList<int>& recvOffsets,
388 
389  const label communicator
390 )
391 {
392  label np = nProcs(communicator);
393 
394  if
395  (
396  sendSizes.size() != np
397  || sendOffsets.size() != np
398  || recvSizes.size() != np
399  || recvOffsets.size() != np
400  )
401  {
403  << "Size of sendSize " << sendSizes.size()
404  << ", sendOffsets " << sendOffsets.size()
405  << ", recvSizes " << recvSizes.size()
406  << " or recvOffsets " << recvOffsets.size()
407  << " is not equal to the number of processors in the domain "
408  << np
410  }
411 
412  if (!UPstream::parRun())
413  {
414  if (recvSizes[0] != sendSizes[0])
415  {
417  << "Bytes to send " << sendSizes[0]
418  << " does not equal bytes to receive " << recvSizes[0]
420  }
421  memmove(recvData, &sendData[sendOffsets[0]], recvSizes[0]);
422  }
423  else
424  {
425  if
426  (
427  MPI_Alltoallv
428  (
429  const_cast<char*>(sendData),
430  const_cast<int*>(sendSizes.begin()),
431  const_cast<int*>(sendOffsets.begin()),
432  MPI_BYTE,
433  recvData,
434  const_cast<int*>(recvSizes.begin()),
435  const_cast<int*>(recvOffsets.begin()),
436  MPI_BYTE,
438  )
439  )
440  {
442  << "MPI_Alltoallv failed for sendSizes " << sendSizes
443  << " recvSizes " << recvSizes
444  << " communicator " << communicator
446  }
447  }
448 }
449 
450 
452 (
453  const char* sendData,
454  int sendSize,
455 
456  char* recvData,
457  const UList<int>& recvSizes,
458  const UList<int>& recvOffsets,
459  const label communicator
460 )
461 {
462  label np = nProcs(communicator);
463 
464  if
465  (
466  UPstream::master(communicator)
467  && (recvSizes.size() != np || recvOffsets.size() < np)
468  )
469  {
470  // Note: allow recvOffsets to be e.g. 1 larger than np so we
471  // can easily loop over the result
472 
474  << "Size of recvSizes " << recvSizes.size()
475  << " or recvOffsets " << recvOffsets.size()
476  << " is not equal to the number of processors in the domain "
477  << np
479  }
480 
481  if (!UPstream::parRun())
482  {
483  memmove(recvData, sendData, sendSize);
484  }
485  else
486  {
487  if
488  (
489  MPI_Gatherv
490  (
491  const_cast<char*>(sendData),
492  sendSize,
493  MPI_BYTE,
494  recvData,
495  const_cast<int*>(recvSizes.begin()),
496  const_cast<int*>(recvOffsets.begin()),
497  MPI_BYTE,
498  0,
499  MPI_Comm(PstreamGlobals::MPICommunicators_[communicator])
500  )
501  )
502  {
504  << "MPI_Gatherv failed for sendSize " << sendSize
505  << " recvSizes " << recvSizes
506  << " communicator " << communicator
508  }
509  }
510 }
511 
512 
514 (
515  const char* sendData,
516  const UList<int>& sendSizes,
517  const UList<int>& sendOffsets,
518 
519  char* recvData,
520  int recvSize,
521  const label communicator
522 )
523 {
524  label np = nProcs(communicator);
525 
526  if
527  (
528  UPstream::master(communicator)
529  && (sendSizes.size() != np || sendOffsets.size() != np)
530  )
531  {
533  << "Size of sendSizes " << sendSizes.size()
534  << " or sendOffsets " << sendOffsets.size()
535  << " is not equal to the number of processors in the domain "
536  << np
538  }
539 
540  if (!UPstream::parRun())
541  {
542  memmove(recvData, sendData, recvSize);
543  }
544  else
545  {
546  if
547  (
548  MPI_Scatterv
549  (
550  const_cast<char*>(sendData),
551  const_cast<int*>(sendSizes.begin()),
552  const_cast<int*>(sendOffsets.begin()),
553  MPI_BYTE,
554  recvData,
555  recvSize,
556  MPI_BYTE,
557  0,
558  MPI_Comm(PstreamGlobals::MPICommunicators_[communicator])
559  )
560  )
561  {
563  << "MPI_Scatterv failed for sendSizes " << sendSizes
564  << " sendOffsets " << sendOffsets
565  << " communicator " << communicator
567  }
568  }
569 }
570 
571 
572 void Foam::UPstream::allocatePstreamCommunicator
573 (
574  const label parentIndex,
575  const label index
576 )
577 {
578  if (index == PstreamGlobals::MPIGroups_.size())
579  {
580  // Extend storage with dummy values
581  MPI_Group newGroup = MPI_GROUP_NULL;
582  PstreamGlobals::MPIGroups_.append(newGroup);
583  MPI_Comm newComm = MPI_COMM_NULL;
584  PstreamGlobals::MPICommunicators_.append(newComm);
585  }
586  else if (index > PstreamGlobals::MPIGroups_.size())
587  {
589  << "PstreamGlobals out of sync with UPstream data. Problem."
591  }
592 
593 
594  if (parentIndex == -1)
595  {
596  // Allocate world communicator
597 
598  if (index != UPstream::worldComm)
599  {
601  << "world communicator should always be index "
603  }
604 
607  MPI_Comm_group
608  (
611  );
612  MPI_Comm_rank
613  (
615  &myProcNo_[index]
616  );
617 
618  // Set the number of processes to the actual number
619  int numProcs;
620  MPI_Comm_size(PstreamGlobals::MPICommunicators_[index], &numProcs);
621 
622  // procIDs_[index] = identity(numProcs);
623  procIDs_[index].setSize(numProcs);
624  forAll(procIDs_[index], i)
625  {
626  procIDs_[index][i] = i;
627  }
628  }
629  else
630  {
631  // Create new group
632  MPI_Group_incl
633  (
634  PstreamGlobals::MPIGroups_[parentIndex],
635  procIDs_[index].size(),
636  procIDs_[index].begin(),
638  );
639 
640  // Create new communicator
641  MPI_Comm_create
642  (
646  );
647 
648  if (PstreamGlobals::MPICommunicators_[index] == MPI_COMM_NULL)
649  {
650  myProcNo_[index] = -1;
651  }
652  else
653  {
654  if
655  (
656  MPI_Comm_rank
657  (
659  &myProcNo_[index]
660  )
661  )
662  {
664  << "Problem :"
665  << " when allocating communicator at " << index
666  << " from ranks " << procIDs_[index]
667  << " of parent " << parentIndex
668  << " cannot find my own rank"
670  }
671  }
672  }
673 }
674 
675 
676 void Foam::UPstream::freePstreamCommunicator(const label communicator)
677 {
678  if (communicator != UPstream::worldComm)
679  {
680  if (PstreamGlobals::MPICommunicators_[communicator] != MPI_COMM_NULL)
681  {
682  // Free communicator. Sets communicator to MPI_COMM_NULL
683  MPI_Comm_free(&PstreamGlobals::MPICommunicators_[communicator]);
684  }
685  if (PstreamGlobals::MPIGroups_[communicator] != MPI_GROUP_NULL)
686  {
687  // Free greoup. Sets group to MPI_GROUP_NULL
688  MPI_Group_free(&PstreamGlobals::MPIGroups_[communicator]);
689  }
690  }
691 }
692 
693 
695 {
697 }
698 
699 
701 {
703  {
705  }
706 }
707 
708 
709 void Foam::UPstream::waitRequests(const label start)
710 {
711  if (debug)
712  {
713  Pout<< "UPstream::waitRequests : starting wait for "
715  << " outstanding requests starting at " << start << endl;
716  }
717 
719  {
721  (
724  start
725  );
726 
727  if
728  (
729  MPI_Waitall
730  (
731  waitRequests.size(),
732  waitRequests.begin(),
733  MPI_STATUSES_IGNORE
734  )
735  )
736  {
738  << "MPI_Waitall returned with error" << Foam::endl;
739  }
740 
741  resetRequests(start);
742  }
743 
744  if (debug)
745  {
746  Pout<< "UPstream::waitRequests : finished wait." << endl;
747  }
748 }
749 
750 
752 {
753  if (debug)
754  {
755  Pout<< "UPstream::waitRequest : starting wait for request:" << i
756  << endl;
757  }
758 
759  if (i >= PstreamGlobals::outstandingRequests_.size())
760  {
762  << "There are " << PstreamGlobals::outstandingRequests_.size()
763  << " outstanding send requests and you are asking for i=" << i
764  << nl
765  << "Maybe you are mixing blocking/non-blocking comms?"
767  }
768 
769  if
770  (
771  MPI_Wait
772  (
774  MPI_STATUS_IGNORE
775  )
776  )
777  {
779  << "MPI_Wait returned with error" << Foam::endl;
780  }
781 
782  if (debug)
783  {
784  Pout<< "UPstream::waitRequest : finished wait for request:" << i
785  << endl;
786  }
787 }
788 
789 
791 {
792  if (debug)
793  {
794  Pout<< "UPstream::finishedRequest : checking request:" << i
795  << endl;
796  }
797 
798  if (i >= PstreamGlobals::outstandingRequests_.size())
799  {
801  << "There are " << PstreamGlobals::outstandingRequests_.size()
802  << " outstanding send requests and you are asking for i=" << i
803  << nl
804  << "Maybe you are mixing blocking/non-blocking comms?"
806  }
807 
808  int flag;
809  MPI_Test
810  (
812  &flag,
813  MPI_STATUS_IGNORE
814  );
815 
816  if (debug)
817  {
818  Pout<< "UPstream::finishedRequest : finished request:" << i
819  << endl;
820  }
821 
822  return flag != 0;
823 }
824 
825 
827 {
828  int tag;
829  if (PstreamGlobals::freedTags_.size())
830  {
832  }
833  else
834  {
835  tag = PstreamGlobals::nTags_++;
836  }
837 
838  if (debug)
839  {
840  // if (UPstream::lateBlocking > 0)
841  //{
842  // string& poutp = Pout.prefix();
843  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = 'X';
844  // Perr.prefix() = Pout.prefix();
845  //}
846  Pout<< "UPstream::allocateTag " << s
847  << " : tag:" << tag
848  << endl;
849  }
850 
851  return tag;
852 }
853 
854 
856 {
857  int tag;
858  if (PstreamGlobals::freedTags_.size())
859  {
861  }
862  else
863  {
864  tag = PstreamGlobals::nTags_++;
865  }
866 
867  if (debug)
868  {
869  // if (UPstream::lateBlocking > 0)
870  //{
871  // string& poutp = Pout.prefix();
872  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = 'X';
873  // Perr.prefix() = Pout.prefix();
874  //}
875  Pout<< "UPstream::allocateTag " << s
876  << " : tag:" << tag
877  << endl;
878  }
879 
880  return tag;
881 }
882 
883 
884 void Foam::UPstream::freeTag(const char* s, const int tag)
885 {
886  if (debug)
887  {
888  // if (UPstream::lateBlocking > 0)
889  //{
890  // string& poutp = Pout.prefix();
891  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = ' ';
892  // Perr.prefix() = Pout.prefix();
893  //}
894  Pout<< "UPstream::freeTag " << s << " tag:" << tag << endl;
895  }
897 }
898 
899 
900 void Foam::UPstream::freeTag(const word& s, const int tag)
901 {
902  if (debug)
903  {
904  // if (UPstream::lateBlocking > 0)
905  //{
906  // string& poutp = Pout.prefix();
907  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = ' ';
908  // Perr.prefix() = Pout.prefix();
909  //}
910  Pout<< "UPstream::freeTag " << s << " tag:" << tag << endl;
911  }
913 }
914 
915 
916 // ************************************************************************* //
DynamicList< int > freedTags_
string getEnv(const word &)
Return environment variable of given name.
Definition: POSIX.C:97
static void printStack(Ostream &)
Helper function to print a stack.
#define forAll(list, i)
Loop across all elements in list.
Definition: UList.H:434
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:59
Inter-processor communication reduction functions.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:124
static void scatter(const char *sendData, const UList< int > &sendSizes, const UList< int > &sendOffsets, char *recvData, int recvSize, const label communicator=0)
Send data to all processors from the root of the communicator.
Definition: UPstream.C:111
error FatalError
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:323
Various functions to wrap MPI_Allreduce.
Vector2D< scalar > vector2D
vector2D obtained from generic Vector2D
Definition: vector2D.H:49
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:251
static bool master(const label communicator=0)
Am I the master process.
Definition: UPstream.H:423
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished?
Definition: UPstream.C:155
DynamicList< MPI_Request > outstandingRequests_
static label nRequests()
Get number of outstanding requests.
Definition: UPstream.C:137
static label worldComm
Default communicator (all processors)
Definition: UPstream.H:278
DynamicList< MPI_Group > MPIGroups_
UList< label > labelUList
Definition: UList.H:65
Functions used by OpenFOAM that are specific to POSIX compliant operating systems and need to be repl...
A List obtained as a section of another List.
Definition: SubList.H:53
gmvFile<< "tracers "<< particles.size()<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().x()<< " ";}gmvFile<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().y()<< " ";}gmvFile<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().z()<< " ";}gmvFile<< nl;forAll(lagrangianScalarNames, i){ word name=lagrangianScalarNames[i];IOField< scalar > s(IOobject(name, runTime.timeName(), cloud::prefix, mesh, IOobject::MUST_READ, IOobject::NO_WRITE))
static int allocateTag(const char *)
Definition: UPstream.C:826
static void freeTag(const char *, const int tag)
Definition: UPstream.C:884
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
Definition: UPstream.C:35
A class for handling words, derived from string.
Definition: word.H:59
void sumReduce(T &Value, label &Count, const int tag=Pstream::msgType(), const label comm=UPstream::worldComm)
static void resetRequests(const label sz)
Truncate number of outstanding requests.
Definition: UPstream.C:143
iterator begin()
Return an iterator to begin traversing the UList.
Definition: UListI.H:216
DynamicList< T, SizeInc, SizeMult, SizeDiv > & append(const T &)
Append an element at the end of the list.
Definition: DynamicListI.H:296
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
Definition: UPstream.H:281
errorManip< error > abort(error &err)
Definition: errorManip.H:131
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
Definition: HashTable.H:60
static void exit(int errnum=1)
Exit program.
Definition: UPstream.C:46
static const char nl
Definition: Ostream.H:260
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
Definition: UPstream.C:147
void reduce(const List< UPstream::commsStruct > &comms, T &Value, const BinaryOp &bop, const int tag, const label comm)
T remove()
Remove and return the top element.
Definition: DynamicListI.H:351
static void abort()
Abort program.
Definition: UPstream.C:52
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:399
static label nProcs(const label communicator=0)
Number of processes in parallel run.
Definition: UPstream.H:411
static void gather(const char *sendData, int sendSize, char *recvData, const UList< int > &recvSizes, const UList< int > &recvOffsets, const label communicator=0)
Receive data from all processors on the master.
Definition: UPstream.C:96
#define WarningInFunction
Report a warning using Foam::Warning.
prefixOSstream Pout(cout, "Pout")
Definition: IOstreams.H:53
DynamicList< MPI_Comm > MPICommunicators_
static void allToAll(const labelUList &sendData, labelUList &recvData, const label communicator=0)
Exchange label with all processors (in the communicator).
Definition: UPstream.C:85
label size() const
Return the number of elements in the UList.
Definition: UListI.H:311
static void waitRequest(const label i)
Wait until request i has finished.
Definition: UPstream.C:151
static void addValidParOptions(HashTable< string > &validParOptions)
Add the valid option this type of communications library.
Definition: UPstream.C:31
void allReduce(Type &Value, int count, MPI_Datatype MPIType, MPI_Op op, const BinaryOp &bop, const int tag, const label communicator)