44 #define MPI_SCALAR MPI_FLOAT 46 #define MPI_SCALAR MPI_DOUBLE 48 #define MPI_SCALAR MPI_LONG_DOUBLE 59 validParOptions.insert(
"np",
"");
60 validParOptions.insert(
"p4pg",
"PI file");
61 validParOptions.insert(
"p4wd",
"directory");
62 validParOptions.insert(
"p4amslave",
"");
63 validParOptions.insert(
"p4yourname",
"hostname");
64 validParOptions.insert(
"machinefile",
"machine file");
71 int provided_thread_support;
81 &provided_thread_support
90 MPI_Comm_rank(MPI_COMM_WORLD, &myGlobalRank);
106 Pout<<
"UPstream::init : initialised with numProcs:" << numprocs
107 <<
" myRank:" << myRank <<
endl;
113 <<
"bool IPstream::init(int& argc, char**& argv) : " 114 "attempt to run parallel on 1 processor" 120 setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
123 string bufferSizeName =
getEnv(
"MPI_BUFFER_SIZE");
125 if (bufferSizeName.size())
127 int bufferSize = atoi(bufferSizeName.c_str());
131 MPI_Buffer_attach(
new char[bufferSize], bufferSize);
137 <<
"UPstream::init(int& argc, char**& argv) : " 138 <<
"environment variable MPI_BUFFER_SIZE not defined" 164 MPI_Buffer_detach(&buff, &size);
174 <<
"There are still " << n <<
" outstanding MPI_Requests." <<
endl 175 <<
"This means that your code exited before doing a" 176 <<
" UPstream::waitRequests()." <<
endl 177 <<
"This should not happen for a normal code exit." 182 forAll(myProcNo_, communicator)
184 if (myProcNo_[communicator] != -1)
186 freePstreamCommunicator(communicator);
211 const sumOp<scalar>& bop,
213 const label communicator
218 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
223 allReduce(Value, 1, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
230 const minOp<scalar>& bop,
232 const label communicator
237 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
242 allReduce(Value, 1, MPI_SCALAR, MPI_MIN, bop, tag, communicator);
249 const sumOp<vector2D>& bop,
251 const label communicator
256 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
261 allReduce(Value, 2, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
270 const label communicator
275 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
280 vector2D twoScalars(Value, scalar(Count));
281 reduce(twoScalars, sumOp<vector2D>(), tag, communicator);
283 Value = twoScalars.x();
284 Count = twoScalars.y();
291 const sumOp<scalar>& bop,
293 const label communicator,
297 #ifdef MPIX_COMM_TYPE_SHARED 319 Pout<<
"UPstream::allocateRequest for non-blocking reduce" 320 <<
" : request:" << requestID
325 reduce(Value, bop, tag, communicator);
335 const label communicator
340 if (sendData.size() != np || recvData.size() != np)
343 <<
"Size of sendData " << sendData.size()
344 <<
" or size of recvData " << recvData.size()
345 <<
" is not equal to the number of processors in the domain " 352 recvData.deepCopy(sendData);
360 const_cast<label*>(sendData.begin()),
371 <<
"MPI_Alltoall failed for " << sendData
372 <<
" on communicator " << communicator
381 const char* sendData,
389 const label communicator
396 sendSizes.
size() != np
397 || sendOffsets.
size() != np
398 || recvSizes.
size() != np
399 || recvOffsets.
size() != np
403 <<
"Size of sendSize " << sendSizes.
size()
404 <<
", sendOffsets " << sendOffsets.
size()
405 <<
", recvSizes " << recvSizes.
size()
406 <<
" or recvOffsets " << recvOffsets.
size()
407 <<
" is not equal to the number of processors in the domain " 414 if (recvSizes[0] != sendSizes[0])
417 <<
"Bytes to send " << sendSizes[0]
418 <<
" does not equal bytes to receive " << recvSizes[0]
421 memmove(recvData, &sendData[sendOffsets[0]], recvSizes[0]);
429 const_cast<char*>(sendData),
430 const_cast<int*>(sendSizes.
begin()),
431 const_cast<int*>(sendOffsets.
begin()),
434 const_cast<int*>(recvSizes.
begin()),
435 const_cast<int*>(recvOffsets.
begin()),
442 <<
"MPI_Alltoallv failed for sendSizes " << sendSizes
443 <<
" recvSizes " << recvSizes
444 <<
" communicator " << communicator
453 const char* sendData,
459 const label communicator
467 && (recvSizes.
size() != np || recvOffsets.
size() < np)
474 <<
"Size of recvSizes " << recvSizes.
size()
475 <<
" or recvOffsets " << recvOffsets.
size()
476 <<
" is not equal to the number of processors in the domain " 483 memmove(recvData, sendData, sendSize);
491 const_cast<char*>(sendData),
495 const_cast<int*>(recvSizes.
begin()),
496 const_cast<int*>(recvOffsets.
begin()),
504 <<
"MPI_Gatherv failed for sendSize " << sendSize
505 <<
" recvSizes " << recvSizes
506 <<
" communicator " << communicator
515 const char* sendData,
521 const label communicator
529 && (sendSizes.
size() != np || sendOffsets.
size() != np)
533 <<
"Size of sendSizes " << sendSizes.
size()
534 <<
" or sendOffsets " << sendOffsets.
size()
535 <<
" is not equal to the number of processors in the domain " 542 memmove(recvData, sendData, recvSize);
550 const_cast<char*>(sendData),
551 const_cast<int*>(sendSizes.
begin()),
552 const_cast<int*>(sendOffsets.
begin()),
563 <<
"MPI_Scatterv failed for sendSizes " << sendSizes
564 <<
" sendOffsets " << sendOffsets
565 <<
" communicator " << communicator
572 void Foam::UPstream::allocatePstreamCommunicator
574 const label parentIndex,
581 MPI_Group newGroup = MPI_GROUP_NULL;
583 MPI_Comm newComm = MPI_COMM_NULL;
589 <<
"PstreamGlobals out of sync with UPstream data. Problem." 594 if (parentIndex == -1)
601 <<
"world communicator should always be index " 623 procIDs_[index].setSize(numProcs);
624 forAll(procIDs_[index], i)
626 procIDs_[index][i] = i;
635 procIDs_[index].size(),
636 procIDs_[index].begin(),
650 myProcNo_[index] = -1;
665 <<
" when allocating communicator at " << index
666 <<
" from ranks " << procIDs_[index]
667 <<
" of parent " << parentIndex
668 <<
" cannot find my own rank" 676 void Foam::UPstream::freePstreamCommunicator(
const label communicator)
713 Pout<<
"UPstream::waitRequests : starting wait for " 715 <<
" outstanding requests starting at " << start <<
endl;
732 waitRequests.
begin(),
738 <<
"MPI_Waitall returned with error" <<
Foam::endl;
746 Pout<<
"UPstream::waitRequests : finished wait." <<
endl;
755 Pout<<
"UPstream::waitRequest : starting wait for request:" << i
763 <<
" outstanding send requests and you are asking for i=" << i
765 <<
"Maybe you are mixing blocking/non-blocking comms?" 779 <<
"MPI_Wait returned with error" <<
Foam::endl;
784 Pout<<
"UPstream::waitRequest : finished wait for request:" << i
794 Pout<<
"UPstream::finishedRequest : checking request:" << i
802 <<
" outstanding send requests and you are asking for i=" << i
804 <<
"Maybe you are mixing blocking/non-blocking comms?" 818 Pout<<
"UPstream::finishedRequest : finished request:" << i
846 Pout<<
"UPstream::allocateTag " << s
875 Pout<<
"UPstream::allocateTag " << s
894 Pout<<
"UPstream::freeTag " << s <<
" tag:" << tag <<
endl;
910 Pout<<
"UPstream::freeTag " << s <<
" tag:" << tag <<
endl;
DynamicList< int > freedTags_
string getEnv(const word &)
Return environment variable of given name.
static void printStack(Ostream &)
Helper function to print a stack.
#define forAll(list, i)
Loop across all elements in list.
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Inter-processor communication reduction functions.
errorManipArg< error, int > exit(error &err, const int errNo=1)
static void scatter(const char *sendData, const UList< int > &sendSizes, const UList< int > &sendOffsets, char *recvData, int recvSize, const label communicator=0)
Send data to all processors from the root of the communicator.
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Various functions to wrap MPI_Allreduce.
Vector2D< scalar > vector2D
vector2D obtained from generic Vector2D
Ostream & endl(Ostream &os)
Add newline and flush stream.
static bool master(const label communicator=0)
Am I the master process.
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished?
DynamicList< MPI_Request > outstandingRequests_
static label nRequests()
Get number of outstanding requests.
static label worldComm
Default communicator (all processors)
DynamicList< MPI_Group > MPIGroups_
UList< label > labelUList
Functions used by OpenFOAM that are specific to POSIX compliant operating systems and need to be repl...
A List obtained as a section of another List.
gmvFile<< "tracers "<< particles.size()<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().x()<< " ";}gmvFile<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().y()<< " ";}gmvFile<< nl;forAllConstIter(Cloud< passiveParticle >, particles, iter){ gmvFile<< iter().position().z()<< " ";}gmvFile<< nl;forAll(lagrangianScalarNames, i){ word name=lagrangianScalarNames[i];IOField< scalar > s(IOobject(name, runTime.timeName(), cloud::prefix, mesh, IOobject::MUST_READ, IOobject::NO_WRITE))
static int allocateTag(const char *)
static void freeTag(const char *, const int tag)
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
A class for handling words, derived from string.
void sumReduce(T &Value, label &Count, const int tag=Pstream::msgType(), const label comm=UPstream::worldComm)
static void resetRequests(const label sz)
Truncate number of outstanding requests.
iterator begin()
Return an iterator to begin traversing the UList.
DynamicList< T, SizeInc, SizeMult, SizeDiv > & append(const T &)
Append an element at the end of the list.
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
errorManip< error > abort(error &err)
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
static void exit(int errnum=1)
Exit program.
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
void reduce(const List< UPstream::commsStruct > &comms, T &Value, const BinaryOp &bop, const int tag, const label comm)
T remove()
Remove and return the top element.
static void abort()
Abort program.
static bool & parRun()
Is this a parallel run?
static label nProcs(const label communicator=0)
Number of processes in parallel run.
static void gather(const char *sendData, int sendSize, char *recvData, const UList< int > &recvSizes, const UList< int > &recvOffsets, const label communicator=0)
Receive data from all processors on the master.
#define WarningInFunction
Report a warning using Foam::Warning.
prefixOSstream Pout(cout, "Pout")
DynamicList< MPI_Comm > MPICommunicators_
static void allToAll(const labelUList &sendData, labelUList &recvData, const label communicator=0)
Exchange label with all processors (in the communicator).
label size() const
Return the number of elements in the UList.
static void waitRequest(const label i)
Wait until request i has finished.
static void addValidParOptions(HashTable< string > &validParOptions)
Add the valid option this type of communications library.
void allReduce(Type &Value, int count, MPI_Datatype MPIType, MPI_Op op, const BinaryOp &bop, const int tag, const label communicator)