/* This file is part of the OpenLB library
*
* Copyright (C) 2007 The OpenLB project
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the Free
* Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
/** \file
* Wrapper functions that simplify the use of MPI, template instatiations
*/
#ifdef PARALLEL_MODE_MPI
#include "communication/mpiManager.h"
#include "core/blockData2D.h"
#include "core/olbDebug.h"
#include <iostream>
#include <algorithm>
#ifndef OLB_PRECOMPILED
#include "core/blockData2D.hh"
#endif
namespace olb {
namespace singleton {
MpiManager::MpiManager() : ok(false), clout(std::cout,"MpiManager")
{ }
MpiManager::~MpiManager()
{
if (ok) {
MPI_Finalize();
ok = false;
}
}
void MpiManager::init(int *argc, char ***argv)
{
int ok1 = MPI_Init(argc, argv);
int ok2 = MPI_Comm_rank(MPI_COMM_WORLD,&taskId);
int ok3 = MPI_Comm_size(MPI_COMM_WORLD,&numTasks);
ok = (ok1==0 && ok2==0 && ok3==0);
clout << "Sucessfully initialized, numThreads=" << getSize() << std::endl;
}
int MpiManager::getSize() const
{
return numTasks;
}
int MpiManager::getRank() const
{
return taskId;
}
int MpiManager::bossId() const
{
return 0;
}
bool MpiManager::isMainProcessor() const
{
return bossId() == getRank();
}
double MpiManager::getTime() const
{
if (!ok) {
return 0.;
}
return MPI_Wtime();
}
void MpiManager::barrier(MPI_Comm comm)
{
if (!ok) {
return;
}
MPI_Barrier(comm);
}
template <>
void MpiManager::send<bool>(bool *buf, int count, int dest, int tag, MPI_Comm comm)
{
if (!ok) {
return;
}
MPI_Send(static_cast<void*>(buf), count, MPI_BYTE, dest, tag, comm);
}
template <>
void MpiManager::send<char>(char *buf, int count, int dest, int tag, MPI_Comm comm)
{
if (!ok) {
return;
}
MPI_Send(static_cast<void*>(buf), count, MPI_CHAR, dest, tag, comm);
}
template <>
void MpiManager::send<int>(int *buf, int count, int dest, int tag, MPI_Comm comm)
{
if (!ok) {
return;
}
MPI_Send(static_cast<void*>(buf), count, MPI_INT, dest, tag, comm);
}
template <>
void MpiManager::send<float>(float *buf, int count, int dest, int tag, MPI_Comm comm)
{
if (!ok) {
return;
}
MPI_Send(static_cast<void*>(buf), count, MPI_FLOAT, dest, tag, comm);
}
template <>