IPCC  1.0
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
CKNMPIManager Class Reference

MPI Mangement class. More...

#include "KNMPIManager.h"

Collaboration diagram for CKNMPIManager:
Collaboration graph

Classes

struct  COMPLEX_NUMBER
 Complex number. More...
 

Public Member Functions

 CKNMPIManager ()
 Constructor. More...
 
 ~CKNMPIManager ()
 Destructor. More...
 

Static Public Member Functions

static bool InitLevel (int nMPILevel, int nFindingDegeneratedEVCount)
 Init MPI Level, most low level is for multi node cacluation for Lanczos. More...
 
static bool CheckDeflationNodeCount (int nNeedNodeCount)
 Checking node counts fix to deflation group. More...
 
static void SetMPIEnviroment (int nRank, int nTotalNode)
 Set MPI Enviroment. More...
 
static void LoadBlancingForLanczos (int nRowCount)
 Load blancing for MPI, this function only for lanczos solving without geometric constrcution. More...
 
static void LoadBlancing (int nElementCount)
 Load blancing for MPI, This function for lanczos solving with geometric constrcution. More...
 
static int GetCurrentLoadBalanceCount ()
 Get Current node's rank load balancing number. More...
 
static int GetLoadBalanceCount (int nRank)
 
static int GetCurrentRank ()
 
static int GetCurrentRank (MPI_Comm comm)
 Get Current node's rank number. More...
 
static int GetTotalNodeCount ()
 
static bool IsRootRank ()
 Get Total node count. More...
 
static bool IsRootRank (MPI_Comm comm)
 Check this node is root rank in 'comm' MPI_Comm. More...
 
static bool IsInMPIRoutine ()
 
static void BroadcastVector (CKNMatrixOperation::CKNVector *pVector)
 Check this processing running on MPI Enviorment. More...
 
static void BroadcastBool (bool *boolValue, int nRootRank=0)
 Broadcst boolean value. More...
 
static void BroadcastDouble (double *pValue, unsigned int nSize, int nRootRank=0, MPI_Comm comm=MPI_COMM_NULL)
 Broadcst boolean value. More...
 
static void BroadcastInt (int *pValue, unsigned int nSize, int nRootRank=0, MPI_Comm comm=MPI_COMM_NULL)
 Broadcst boolean value. More...
 
static void BroadcastLanczosResult (CKNLanczosMethod::LPEIGENVALUE_RESULT lpResult, int nIterationCount)
 Broadcast Lanczos result. More...
 
static void SplitVector (CKNMatrixOperation::CKNVector *pVector, int nRootRank)
 Split vector to sub rank. More...
 
static void MergeVector (CKNMatrixOperation::CKNVector *pVector, CKNMatrixOperation::CKNVector *pResultVector, unsigned int nMergeSize)
 Merge vector to sub rank. More...
 
static void MergeVectorOptimal (CKNMatrixOperation::CKNVector *pSrcVector, CKNMatrixOperation::CKNVector *pResultVector, unsigned int nMergeSize, double fFirstIndex)
 Merge vector to sub rank, operated without vector class member function call. More...
 
static void MergeVectorEx_Optimal (CKNMatrixOperation::CKNVector *pVector, CKNMatrixOperation::CKNVector *pResultVector, unsigned int nMergeSize, double fFirstIndex, unsigned int nSizeFromPrevRank, unsigned int nSizeFromNextRank, unsigned int nSizetoPrevRank, unsigned int nSizetoNextRank, unsigned int *)
 Merge vector for 1 layer exchanging. More...
 
static LPCOMPLEX_NUMBER ConvertVectorToMPIComplexBuffer (CKNMatrixOperation::CKNVector *pVector)
 Convert vector class to MPI_COMPLEX array. More...
 
static void AllReduceComlex (CKNComplex *pNumber, CKNTimeMeasurement::MEASUREMENT_INDEX INDEX=CKNTimeMeasurement::COMM)
 Do all reduce function with CKNComplex. More...
 
static double AllReduceDouble (double fNumber)
 Do all reduce function with CKNComplex. More...
 
static int GetRootRank ()
 
static void FinalizeManager ()
 Get Root rank. More...
 
static void InitCommunicationBufferMetric ()
 Initializing MPI Communication buffer for MVMul. More...
 
static void SendDoubleBufferSync (int nTargetRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Sending buffer for double data array with sync. More...
 
static void WaitSendDoubleBufferSync (MPI_Request *req)
 Waiting sending double buffer sync function. More...
 
static void ReceiveDoubleBufferSync (int nSourceRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Receivinging buffer for double data array with sync. More...
 
static void WaitReceiveDoubleBufferAsync (MPI_Request *req)
 Waiting recevinging double buffer sync function. More...
 
static MPI_Comm GetMPIComm ()
 
static bool IsMultiLevelMPI ()
 Get MPI_Comm. More...
 
static void BarrierAllComm ()
 Is Multilevel MPI Setting. More...
 
static void Barrier ()
 
static bool IsLanczosComputeRoot ()
 Barrier current deflation group. More...
 
static bool IsDeflationRoot ()
 Checking is root rank of Lanczos computation. More...
 
static int * GetEigenvalueCountFromDeflationGroup (int nDeflationGroupCount, int nLocalEVCount)
 Checking is root rank of Deflation computation. More...
 
static void GatherVDouble (int nSourceCount, double *pReceiveBuffer, int *pSourceCount, double *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
 GatherV for double wrapping function. More...
 
static void GatherVInt (int nSourceCount, int *pReceiveBuffer, int *pSourceCount, int *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
 GahterV for int wrapping function. More...
 
static void GatherEVFromDeflationGroup (int nSourceCount, double *pReceiveBuffer, int *pSourceCount, double *pSendBuffer, int nSendCount)
 
static void GatherEVIterationFromDeflationGroup (int nSourceCount, int *pReceiveBuffer, int *pSourceCount, int *pSendBuffer, int nSendCount)
 Gather eigenvalue from deflation group. More...
 
static void ExchangeCommand (double *pfCommand, MPI_Comm comm)
 Gather eigenvalue finding iteration number from deflation group. More...
 
static MPI_Comm GetLanczosComputComm ()
 
static MPI_Comm GetDeflationComm ()
 Getting Lanczos computing group MPI_Comm. More...
 
static void SendVectorSync (int nTargetRank, CKNMatrixOperation::CKNVector *pVector, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Getting Deflation computing group MPI_Comm. More...
 
static void ReceiveVectorSync (int nSourceRank, CKNMatrixOperation::CKNVector *pVector, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
 Receiving Vector with sync. More...
 
static unsigned int GetLanczosGroupIndex ()
 

Private Types

typedef struct
CKNMPIManager::COMPLEX_NUMBER
LPCOMPLEX_NUMBER
 

Static Private Attributes

static int m_nCurrentRank = 0
 Getting Lanczos group index. More...
 
static int m_nCommWorldRank = 0
 MPI Rank before split. More...
 
static int m_nTotalNode = 1
 Total node count. More...
 
static bool m_bStartMPI = false
 MPI_Init call or not. More...
 
static int * m_pLoadBalance = NULL
 Load blancing for MPI Communication. More...
 
static LPCOMPLEX_NUMBER m_pCommBuffer = NULL
 Data buffer for MPI Communication. More...
 
static LPCOMPLEX_NUMBER m_pConvertingBuffer = NULL
 Data buffer for Vector converting. More...
 
static int * m_pRecvCount = NULL
 Reciving count variable for MPI comminication. More...
 
static int * m_pSendCount = NULL
 Sending count variable for MPI comminication. More...
 
static int * m_pBankInfo = NULL
 After MPI Split bank infomation. More...
 
static int * m_pDispls = NULL
 Displ for MPI comminication. More...
 
static MPI_Request m_SendDoubleAsyncRequest = MPI_REQUEST_NULL
 Request for sending double. More...
 
static MPI_Request m_ReceiveDoubleAsyncRequest = MPI_REQUEST_NULL
 Request for receving double. More...
 
static unsigned int m_nMPILevel = 1
 MPI Level. More...
 
static bool m_bNeedPostOperation [10] = { false, false, false, false, false, false, false, false, false, false }
 MPI Level. More...
 
static MPI_Comm m_mpiCommIndex = MPI_COMM_WORLD
 Lanczos Method MPI_Comm. More...
 
static MPI_Comm m_deflationComm = MPI_COMM_NULL
 Deflation computing MPI_Comm. More...
 
static MPI_Group m_lanczosGroup = MPI_GROUP_EMPTY
 MPI Group for Lanczos computation. More...
 
static MPI_Group m_deflationGroup = MPI_GROUP_EMPTY
 MPI Group for Deflation computation. More...
 
static unsigned int m_nLanczosGroupIndex = 0
 MPI Group index for Lanczos group. More...
 
static bool m_bMultiLevel = false
 Flag for Multilevel MPI group. More...
 

Detailed Description

MPI Mangement class.

Date
2014/8/25
Author
Kyu Nam Cho(mysto.nosp@m.us@g.nosp@m.mail..nosp@m.com)

Definition at line 21 of file KNMPIManager.h.

Member Typedef Documentation

Constructor & Destructor Documentation

CKNMPIManager::CKNMPIManager ( )

Constructor.

Definition at line 38 of file KNMPIManager.cpp.

39 {
40 }
CKNMPIManager::~CKNMPIManager ( )

Destructor.

Definition at line 42 of file KNMPIManager.cpp.

43 {
44 }

Member Function Documentation

void CKNMPIManager::AllReduceComlex ( CKNComplex pNumber,
CKNTimeMeasurement::MEASUREMENT_INDEX  INDEX = CKNTimeMeasurement::COMM 
)
static

Do all reduce function with CKNComplex.

Parameters
pNumberVariable that want to sum
INDEXTime measurement index

Definition at line 625 of file KNMPIManager.cpp.

References CKNComplex::GetImaginaryNumber(), CKNComplex::GetRealNumber(), m_mpiCommIndex, CKNTimeMeasurement::MeasurementEnd(), CKNTimeMeasurement::MeasurementStart(), and CKNComplex::SetComplexNumber().

Referenced by CKNMatrixOperation::VVDot().

626 {
627 #ifdef DISABLE_MPI_ROUTINE
628  return;
629 #endif
630 
631  double fSend[2], fRecv[2];
632 
633  fSend[0] = pNumber->GetRealNumber();
634  fSend[1] = pNumber->GetImaginaryNumber();
635 
637  MPI_Allreduce(fSend, fRecv, 2, MPI_DOUBLE, MPI_SUM, m_mpiCommIndex);
639 
640  pNumber->SetComplexNumber(fRecv[0], fRecv[1]);
641 }
double GetImaginaryNumber() const
Get imaginary part.
Definition: KNComplex.h:27
double GetRealNumber() const
Get real part.
Definition: KNComplex.h:26
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
void SetComplexNumber(double fReal, double fImaginaray)
Set Complex number using real part and imaginary part.
Definition: KNComplex.cpp:59

Here is the call graph for this function:

Here is the caller graph for this function:

double CKNMPIManager::AllReduceDouble ( double  fNumber)
static

Do all reduce function with CKNComplex.

Parameters
fNumberVariable that want to sum
Returns
Reducing result

Definition at line 647 of file KNMPIManager.cpp.

References CKNTimeMeasurement::COMM, m_mpiCommIndex, CKNTimeMeasurement::MeasurementEnd(), and CKNTimeMeasurement::MeasurementStart().

Referenced by CKNGeometricShape::CalculateUnitcellCount(), CKNMatrixOperation::CKNVector::GetNorm(), and CKNTBMS_Solver::Launching_TBMS_Solver().

648 {
649 #ifdef DISABLE_MPI_ROUTINE
650  return fNumber;
651 #endif
652 
653  double fRecv;
654 
656  MPI_Allreduce(&fNumber, &fRecv, 1, MPI_DOUBLE, MPI_SUM, m_mpiCommIndex);
658 
659  return fRecv;
660 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

static void CKNMPIManager::Barrier ( )
inlinestatic

Definition at line 70 of file KNMPIManager.h.

References m_mpiCommIndex.

Referenced by CKNLanczosMethod::SaveLanczosResult().

70 { MPI_Barrier(m_mpiCommIndex); };
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101

Here is the caller graph for this function:

void CKNMPIManager::BarrierAllComm ( )
static

Is Multilevel MPI Setting.

Barrier MPI_COMM_WORLD

< Caution! This function wait all rank even if Comm split into several colors

Definition at line 791 of file KNMPIManager.cpp.

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

792 {
794  MPI_Barrier(MPI_COMM_WORLD);
795 }

Here is the caller graph for this function:

void CKNMPIManager::BroadcastBool ( bool *  boolValue,
int  nRootRank = 0 
)
static

Broadcst boolean value.

Parameters
boolValuebool variable for want to broadcasting
nRootRankRoot rank index

Definition at line 524 of file KNMPIManager.cpp.

References CKNTimeMeasurement::COMM, m_mpiCommIndex, CKNTimeMeasurement::MeasurementEnd(), and CKNTimeMeasurement::MeasurementStart().

Referenced by CKNLanczosMethod::LanczosIterationLoop().

525 {
526 #ifdef DISABLE_MPI_ROUTINE
527  return;
528 #endif
529 
531  MPI_Bcast(boolValue, 1, MPI_C_BOOL, nRootRank, m_mpiCommIndex);
533 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::BroadcastDouble ( double *  pValue,
unsigned int  nSize,
int  nRootRank = 0,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

Broadcst boolean value.

Parameters
pValuedouble data buffer for want to broadcasting
nSizeData buffer size
nRootRankRoot rank index

Definition at line 540 of file KNMPIManager.cpp.

References CKNTimeMeasurement::COMM, m_mpiCommIndex, CKNTimeMeasurement::MeasurementEnd(), and CKNTimeMeasurement::MeasurementStart().

Referenced by CKNLanczosMethod::DoResidualCheck(), CKNGeometricShape::ExchangeAtomInfoBetweenNode(), ExchangeCommand(), and CKNLanczosMethod::MergeDegeneratedEigenvalues().

541 {
542 #ifdef DISABLE_MPI_ROUTINE
543  return;
544 #endif
545 
547  if( MPI_COMM_NULL == comm )
548  MPI_Bcast(pValue, nSize, MPI_DOUBLE, nRootRank, m_mpiCommIndex);
549  else
550  MPI_Bcast(pValue, nSize, MPI_DOUBLE, nRootRank, comm);
552 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::BroadcastInt ( int *  pValue,
unsigned int  nSize,
int  nRootRank = 0,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

Broadcst boolean value.

Parameters
[in,out]pValueBroadcasting variable
nSizeBroadcasting variable size
nRootRankRoot rank number
commBroadcating MPI_Comm range

Definition at line 560 of file KNMPIManager.cpp.

References CKNTimeMeasurement::COMM, m_mpiCommIndex, CKNTimeMeasurement::MeasurementEnd(), and CKNTimeMeasurement::MeasurementStart().

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

561 {
562 #ifdef DISABLE_MPI_ROUTINE
563  return;
564 #endif
565 
567  if( MPI_COMM_NULL == comm )
568  MPI_Bcast(pValue, nSize, MPI_INT, nRootRank, m_mpiCommIndex);
569  else
570  MPI_Bcast(pValue, nSize, MPI_INT, nRootRank, comm);
572 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::BroadcastLanczosResult ( CKNLanczosMethod::LPEIGENVALUE_RESULT  lpResult,
int  nIterationCount 
)
static

Broadcast Lanczos result.

Parameters
lpResultLanczos method result that want to boardcasting
nIterationCountCurrent iteration count

Definition at line 578 of file KNMPIManager.cpp.

References CKNTimeMeasurement::COMM, GetRootRank(), IsRootRank(), m_mpiCommIndex, CKNTimeMeasurement::MALLOC, CKNTimeMeasurement::MeasurementEnd(), CKNTimeMeasurement::MeasurementStart(), CKNLanczosMethod::EIGENVALUE_RESULT::nEigenValueCount, CKNLanczosMethod::EIGENVALUE_RESULT::nEigenValueCountForMemeory, CKNLanczosMethod::EIGENVALUE_RESULT::nEigenVectorSize, CKNLanczosMethod::EIGENVALUE_RESULT::nMaxEigenValueFoundIteration, CKNLanczosMethod::EIGENVALUE_RESULT::pEigenValueFoundIteration, and CKNLanczosMethod::EIGENVALUE_RESULT::pEigenVectors.

Referenced by CKNLanczosMethod::LanczosIteration().

579 {
580  unsigned int nCastData[4];
581  unsigned int i;
582 
583 #ifdef DISABLE_MPI_ROUTINE
584  return;
585 #endif
586 
587  if( IsRootRank() )
588  {
589  nCastData[0] = lpResult->nEigenValueCount;
590  nCastData[1] = lpResult->nEigenValueCountForMemeory;
591  nCastData[2] = lpResult->nMaxEigenValueFoundIteration;
592  nCastData[3] = lpResult->nEigenVectorSize;
593  }
594 
596  MPI_Bcast(nCastData, 4, MPI_INT, GetRootRank(), m_mpiCommIndex);
598 
599  if( !IsRootRank() )
600  {
601  lpResult->nEigenValueCount = nCastData[0];
602  lpResult->nEigenValueCountForMemeory = nCastData[1];
603  lpResult->nMaxEigenValueFoundIteration = nCastData[2];
604  lpResult->nEigenVectorSize = nCastData[3];
605 
607  lpResult->pEigenValueFoundIteration = (unsigned int*)malloc(sizeof(unsigned int)*lpResult->nEigenValueCount);
608  lpResult->pEigenVectors = (double**)malloc(sizeof(double*)*lpResult->nEigenValueCount);
609  for (i = 0; i < lpResult->nEigenValueCount; ++i)
610  lpResult->pEigenVectors[i] = (double*)malloc(sizeof(double)*lpResult->nEigenVectorSize);
612  }
613 
615  MPI_Bcast(lpResult->pEigenValueFoundIteration, lpResult->nEigenValueCount, MPI_INT, GetRootRank(), m_mpiCommIndex);
616  for (i = 0; i < lpResult->nEigenValueCount; ++i)
617  MPI_Bcast(lpResult->pEigenVectors[i], lpResult->nEigenVectorSize, MPI_DOUBLE, GetRootRank(), m_mpiCommIndex);
619 }
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static int GetRootRank()
Definition: KNMPIManager.h:60
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static bool IsRootRank()
Get Total node count.

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::BroadcastVector ( CKNMatrixOperation::CKNVector pVector)
static

Check this processing running on MPI Enviorment.

Broadcast vector to sub rank

Parameters
pVectorVector for want to broadcast
Remarks
This function currently not used 2/Feb/2015

Definition at line 263 of file KNMPIManager.cpp.

References IsInMPIRoutine().

264 {
265 #ifdef DISABLE_MPI_ROUTINE
266  if (!IsInMPIRoutine())
267  return;
268 #endif
269 }
static bool IsInMPIRoutine()
Definition: KNMPIManager.h:47

Here is the call graph for this function:

bool CKNMPIManager::CheckDeflationNodeCount ( int  nNeedNodeCount)
static

Checking node counts fix to deflation group.

Parameters
nNeedNodeCountDeflation group count
Returns
Can make defaltion group or not

Definition at line 127 of file KNMPIManager.cpp.

References m_nTotalNode.

Referenced by InitLevel().

128 {
129  if( 0 == m_nTotalNode % nNeedNodeCount )
130  return true;
131  else
132  return false;
133 }
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88

Here is the caller graph for this function:

CKNMPIManager::LPCOMPLEX_NUMBER CKNMPIManager::ConvertVectorToMPIComplexBuffer ( CKNMatrixOperation::CKNVector pVector)
static

Convert vector class to MPI_COMPLEX array.

Parameters
pVectorConverting target verctor
Returns
Conveting result that structure for complex part array (AoS type)
Remarks
For performance AoS type should be changed into SoA type

Definition at line 667 of file KNMPIManager.cpp.

References CKNMPIManager::COMPLEX_NUMBER::fImginary, CKNMPIManager::COMPLEX_NUMBER::fReal, CKNMatrixOperation::CKNVector::GetAt(), CKNComplex::GetImaginaryNumber(), CKNComplex::GetRealNumber(), CKNMatrixOperation::CKNVector::GetSize(), and m_pConvertingBuffer.

Referenced by SplitVector().

668 {
669  LPCOMPLEX_NUMBER lpResult = NULL;
670  unsigned int i, nCount;
671 
672  nCount = pVector->GetSize();
673  lpResult = m_pConvertingBuffer;
674 
675  for (i = 0; i < nCount; i++)
676  {
677  lpResult[i].fReal = pVector->GetAt(i).GetRealNumber();
678  lpResult[i].fImginary = pVector->GetAt(i).GetImaginaryNumber();
679  }
680 
681  return lpResult;
682 }
double GetImaginaryNumber() const
Get imaginary part.
Definition: KNComplex.h:27
double GetRealNumber() const
Get real part.
Definition: KNComplex.h:26
CKNComplex GetAt(unsigned int nIndex)
Get element value from specific index.
static LPCOMPLEX_NUMBER m_pConvertingBuffer
Data buffer for Vector converting.
Definition: KNMPIManager.h:92
unsigned int GetSize()
Return Vector elements size.
struct CKNMPIManager::COMPLEX_NUMBER * LPCOMPLEX_NUMBER

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::ExchangeCommand ( double *  pfCommand,
MPI_Comm  comm 
)
static

Gather eigenvalue finding iteration number from deflation group.

Exchanging command between MPI_Comm

Parameters
pfCommandCommand buffer
commMPI_Comm for exchaning command

Definition at line 878 of file KNMPIManager.cpp.

References BroadcastDouble(), and COMMAND_SIZE.

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

879 {
880  BroadcastDouble(pfCommand, COMMAND_SIZE, 0, comm);
881 }
static void BroadcastDouble(double *pValue, unsigned int nSize, int nRootRank=0, MPI_Comm comm=MPI_COMM_NULL)
Broadcst boolean value.
#define COMMAND_SIZE
Definition: CKNGlobal.h:102

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::FinalizeManager ( )
static

Get Root rank.

Release all memory

Definition at line 693 of file KNMPIManager.cpp.

References FREE_MEM, CKNTimeMeasurement::FREE_MEM, m_bStartMPI, m_deflationComm, m_deflationGroup, m_lanczosGroup, m_mpiCommIndex, m_nCurrentRank, m_nTotalNode, m_pBankInfo, m_pDispls, m_pLoadBalance, m_pRecvCount, m_pSendCount, CKNTimeMeasurement::MeasurementEnd(), and CKNTimeMeasurement::MeasurementStart().

Referenced by CKNLanczosTest::COmpareWIthMatLabSeOrthMPI(), CKNTBMS_Solver::FinalEvn(), CKNLanczosTest::LargeSizeMatrixMPI(), and CKNGeometricConstructionLaunch::LaunchingGeometricConstructionMPI().

694 {
695  m_bStartMPI = false;
696  m_nCurrentRank = 0;
697  m_nTotalNode = 1;
705  if( MPI_GROUP_EMPTY != m_lanczosGroup )
706  MPI_Group_free(&m_lanczosGroup);
707  if( MPI_GROUP_EMPTY != m_deflationGroup )
708  MPI_Group_free(&m_deflationGroup);
709  if( MPI_COMM_NULL != m_mpiCommIndex && MPI_COMM_WORLD != m_mpiCommIndex)
710  MPI_Comm_free(&m_mpiCommIndex);
711  if( MPI_COMM_NULL != m_deflationComm )
712  MPI_Comm_free(&m_deflationComm);
713 }
static int * m_pSendCount
Sending count variable for MPI comminication.
Definition: KNMPIManager.h:94
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: KNMPIManager.h:102
static int * m_pRecvCount
Reciving count variable for MPI comminication.
Definition: KNMPIManager.h:93
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88
static int * m_pBankInfo
After MPI Split bank infomation.
Definition: KNMPIManager.h:95
static int * m_pDispls
Displ for MPI comminication.
Definition: KNMPIManager.h:96
static MPI_Group m_deflationGroup
MPI Group for Deflation computation.
Definition: KNMPIManager.h:104
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static int * m_pLoadBalance
Load blancing for MPI Communication.
Definition: KNMPIManager.h:90
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static MPI_Group m_lanczosGroup
MPI Group for Lanczos computation.
Definition: KNMPIManager.h:103
static int m_nCurrentRank
Getting Lanczos group index.
Definition: KNMPIManager.h:83
static bool m_bStartMPI
MPI_Init call or not.
Definition: KNMPIManager.h:89

Here is the call graph for this function:

Here is the caller graph for this function:

static void CKNMPIManager::GatherEVFromDeflationGroup ( int  nSourceCount,
double *  pReceiveBuffer,
int *  pSourceCount,
double *  pSendBuffer,
int  nSendCount 
)
inlinestatic

Definition at line 76 of file KNMPIManager.h.

References GatherVDouble(), and m_deflationComm.

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

76 { GatherVDouble(nSourceCount, pReceiveBuffer, pSourceCount, pSendBuffer, nSendCount, m_deflationComm); };
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: KNMPIManager.h:102
static void GatherVDouble(int nSourceCount, double *pReceiveBuffer, int *pSourceCount, double *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
GatherV for double wrapping function.

Here is the call graph for this function:

Here is the caller graph for this function:

static void CKNMPIManager::GatherEVIterationFromDeflationGroup ( int  nSourceCount,
int *  pReceiveBuffer,
int *  pSourceCount,
int *  pSendBuffer,
int  nSendCount 
)
inlinestatic

Gather eigenvalue from deflation group.

Definition at line 77 of file KNMPIManager.h.

References GatherVInt(), and m_deflationComm.

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

77 { GatherVInt(nSourceCount, pReceiveBuffer, pSourceCount, pSendBuffer, nSendCount, m_deflationComm); };
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: KNMPIManager.h:102
static void GatherVInt(int nSourceCount, int *pReceiveBuffer, int *pSourceCount, int *pSendBuffer, int nSendCount, MPI_Comm comm=MPI_COMM_NULL)
GahterV for int wrapping function.

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::GatherVDouble ( int  nSourceCount,
double *  pReceiveBuffer,
int *  pSourceCount,
double *  pSendBuffer,
int  nSendCount,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

GatherV for double wrapping function.

Parameters
nSourceCountGather double buffer source count
[out]pReceiveBufferSaving buffer
pSourceCountSrouce counts (ref. MPI_Gatherv)
pSendBufferSending buffer
nSendCountSending counts
commMPI_Comm for gather data

Definition at line 822 of file KNMPIManager.cpp.

References FREE_MEM, IsDeflationRoot(), and m_mpiCommIndex.

Referenced by GatherEVFromDeflationGroup().

823 {
824  int *pReceiveCount = NULL;
825  int *pDisp = NULL;
826  unsigned int i;
827 
828  if( IsDeflationRoot() )
829  {
830  pDisp = (int*)malloc(sizeof(int)*nSourceCount);
831  pDisp[0] = 0;
832  for( i = 1; i < nSourceCount ; ++i)
833  pDisp[i] = pDisp[i-1] + pSourceCount[i-1];
834  }
835 
836  if( MPI_COMM_NULL == comm )
837  MPI_Gatherv(pSendBuffer, nSendCount, MPI_DOUBLE, pReceiveBuffer, pSourceCount, pDisp, MPI_DOUBLE, 0, m_mpiCommIndex);
838  else
839  MPI_Gatherv(pSendBuffer, nSendCount, MPI_DOUBLE, pReceiveBuffer, pSourceCount, pDisp, MPI_DOUBLE, 0, comm);
840 
841  FREE_MEM(pDisp);
842 }
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static bool IsDeflationRoot()
Checking is root rank of Lanczos computation.
Definition: KNMPIManager.h:72

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::GatherVInt ( int  nSourceCount,
int *  pReceiveBuffer,
int *  pSourceCount,
int *  pSendBuffer,
int  nSendCount,
MPI_Comm  comm = MPI_COMM_NULL 
)
static

GahterV for int wrapping function.

Parameters
nSourceCountGather double buffer source count
[out]pReceiveBufferSaving buffer
pSourceCountSrouce counts (ref. MPI_Gatherv)
pSendBufferSending buffer
nSendCountSending counts
commMPI_Comm for gather data

Definition at line 852 of file KNMPIManager.cpp.

References FREE_MEM, IsDeflationRoot(), and m_mpiCommIndex.

Referenced by GatherEVIterationFromDeflationGroup().

853 {
854  int *pReceiveCount = NULL;
855  int *pDisp = NULL;
856  unsigned int i;
857 
858  if( IsDeflationRoot() )
859  {
860  pDisp = (int*)malloc(sizeof(int)*nSourceCount);
861  pDisp[0] = 0;
862  for( i = 1; i < nSourceCount ; ++i)
863  pDisp[i] = pDisp[i-1] + pSourceCount[i-1];
864  }
865 
866  if( MPI_COMM_NULL == comm )
867  MPI_Gatherv(pSendBuffer, nSendCount, MPI_INT, pReceiveBuffer, pSourceCount, pDisp, MPI_INT, 0, m_mpiCommIndex);
868  else
869  MPI_Gatherv(pSendBuffer, nSendCount, MPI_INT, pReceiveBuffer, pSourceCount, pDisp, MPI_INT, 0, comm);
870 
871  FREE_MEM(pDisp);
872 }
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static bool IsDeflationRoot()
Checking is root rank of Lanczos computation.
Definition: KNMPIManager.h:72

Here is the call graph for this function:

Here is the caller graph for this function:

int CKNMPIManager::GetCurrentLoadBalanceCount ( )
static
int CKNMPIManager::GetCurrentRank ( MPI_Comm  comm)
static

Get Current node's rank number.

Get Current node's rank number

Parameters
commMPI_Comm
Returns
Current rank in 'comm' MPI_Comm

Definition at line 251 of file KNMPIManager.cpp.

252 {
253  int rank;
254 
255  MPI_Comm_rank(comm, &rank);
256  return rank;
257 }
static MPI_Comm CKNMPIManager::GetDeflationComm ( )
inlinestatic

Getting Lanczos computing group MPI_Comm.

Definition at line 80 of file KNMPIManager.h.

References m_deflationComm.

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

80 { return m_deflationComm; };
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: KNMPIManager.h:102

Here is the caller graph for this function:

int * CKNMPIManager::GetEigenvalueCountFromDeflationGroup ( int  nDeflationGroupCount,
int  nLocalEVCount 
)
static

Checking is root rank of Deflation computation.

Collecting total eigenvalue count from All deflation group

Parameters
nDeflationGroupCountDeflation group counts
nLocalEVCountLocal deflation group eigenvalue counts
Returns
Merged eigenvalue counts

Definition at line 802 of file KNMPIManager.cpp.

References IsRootRank(), and m_deflationComm.

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

803 {
804  int *pEVCount = NULL;
805 
807  pEVCount = (int*)malloc(sizeof(int)*nDeflationGroupCount);
808 
809  MPI_Gather(&nLocalEVCount, 1, MPI_INT, pEVCount, 1, MPI_INT, 0, m_deflationComm);
810 
811  return pEVCount;
812 }
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: KNMPIManager.h:102
static bool IsRootRank()
Get Total node count.

Here is the call graph for this function:

Here is the caller graph for this function:

static MPI_Comm CKNMPIManager::GetLanczosComputComm ( )
inlinestatic

Definition at line 79 of file KNMPIManager.h.

References m_mpiCommIndex.

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

79 { return m_mpiCommIndex; };
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101

Here is the caller graph for this function:

static unsigned int CKNMPIManager::GetLanczosGroupIndex ( )
inlinestatic

Definition at line 83 of file KNMPIManager.h.

Referenced by CKNLanczosMethod::LanczosIteration(), and CKNLanczosMethod::MergeDegeneratedEigenvalues().

83 { return m_nLanczosGroupIndex; };
static unsigned int m_nLanczosGroupIndex
MPI Group index for Lanczos group.
Definition: KNMPIManager.h:105

Here is the caller graph for this function:

int CKNMPIManager::GetLoadBalanceCount ( int  nRank)
static
Parameters
nRankTarget rank index

Definition at line 203 of file KNMPIManager.cpp.

References m_nTotalNode, and m_pLoadBalance.

Referenced by CKNMatrixOperation::AllocateLocalCSR(), InitCommunicationBufferMetric(), CKNLanczosTest::LargeSizeMatrixMPI(), MergeVectorEx_Optimal(), MergeVectorOptimal(), SplitVector(), CKNLanczosTest::TestCSRBuildingViaFileLoad(), and CKNLanczosTest::TestCSRBuildingViaFileLoad_().

204 {
205 #ifdef DISABLE_MPI_ROUTINE
206  return 0;
207 #endif
208  if (nRank > m_nTotalNode)
209  return 0;
210 
211  return m_pLoadBalance[nRank];
212 }
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88
static int * m_pLoadBalance
Load blancing for MPI Communication.
Definition: KNMPIManager.h:90

Here is the caller graph for this function:

static MPI_Comm CKNMPIManager::GetMPIComm ( )
inlinestatic

Definition at line 67 of file KNMPIManager.h.

References m_mpiCommIndex.

Referenced by CKNMatrixOperation::AllocateLocalCSR(), CKNLanczosMethod::LanczosIterationLoop(), and CKNMatrixOperation::MVMulEx_AsyncCommWithLocalBlocks().

67 { return m_mpiCommIndex; };
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101

Here is the caller graph for this function:

static int CKNMPIManager::GetRootRank ( )
inlinestatic

Definition at line 60 of file KNMPIManager.h.

Referenced by BroadcastLanczosResult().

60 { return 0;};

Here is the caller graph for this function:

void CKNMPIManager::InitCommunicationBufferMetric ( )
static

Initializing MPI Communication buffer for MVMul.

Parameters
nMatrixSizeMatrix size that want to solving

Definition at line 718 of file KNMPIManager.cpp.

References GetLoadBalanceCount(), GetTotalNodeCount(), m_pDispls, m_pRecvCount, m_pSendCount, CKNTimeMeasurement::MALLOC, CKNTimeMeasurement::MeasurementEnd(), and CKNTimeMeasurement::MeasurementStart().

Referenced by CKNTBMS_Solver::AllocateCSR(), CKNLanczosTest::COmpareWIthMatLabSeOrthMPI(), CKNLanczosTest::LargeSizeMatrixMPI(), CKNGeometricConstructionLaunch::LaunchingGeometricConstructionMPI(), and CKNLanczosLaunching::LaunchingLanczos().

719 {
720  unsigned int i;
721 
723  m_pRecvCount = (int*)malloc(sizeof(int)*GetTotalNodeCount());
724  m_pSendCount = (int*)malloc(sizeof(int)*GetTotalNodeCount());
725  m_pDispls = (int*)malloc(sizeof(int)*GetTotalNodeCount());
726  m_pDispls[0] = 0;
728  for (i = 1; i < (unsigned int)GetTotalNodeCount(); i++)
729  {
731  m_pDispls[i] = m_pDispls[i - 1] + GetLoadBalanceCount(i - 1);
732  }
734 }
static int * m_pSendCount
Sending count variable for MPI comminication.
Definition: KNMPIManager.h:94
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static int GetTotalNodeCount()
Definition: KNMPIManager.h:44
static int GetLoadBalanceCount(int nRank)
static int * m_pRecvCount
Reciving count variable for MPI comminication.
Definition: KNMPIManager.h:93
static int * m_pDispls
Displ for MPI comminication.
Definition: KNMPIManager.h:96
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.

Here is the call graph for this function:

Here is the caller graph for this function:

bool CKNMPIManager::InitLevel ( int  nMPILevel,
int  nFindingDegeneratedEVCount 
)
static

Init MPI Level, most low level is for multi node cacluation for Lanczos.

Parameters
nMPILevelMPI level count
nFindingDegeneratedEVCountDeflation group count
Returns
Spliting MPI Group success or not

< First make group for lanczos method

< Second make group for deflation lanczos - vertical connected group

Definition at line 51 of file KNMPIManager.cpp.

References CheckDeflationNodeCount(), FREE_MEM, GetCurrentRank(), GetTotalNodeCount(), IsDeflationRoot(), IsLanczosComputeRoot(), m_bMultiLevel, m_bNeedPostOperation, m_deflationComm, m_deflationGroup, m_lanczosGroup, m_mpiCommIndex, m_nCommWorldRank, m_nLanczosGroupIndex, SetMPIEnviroment(), and CKNIPCCUtility::SetShow().

Referenced by CKNTBMS_Solver::InitMPIEnv(), and CKNLanczosLaunching::LaunchingLanczos().

52 {
53  bool bRtn = true;
54  int nNeedNodeCount = 1;
55  int world_size, rank;
56  int nPerGroupNode;
57  int nLanczosGroupIndex;
58  int *pNewGroupRank = NULL;
59  unsigned int i;
60  MPI_Group commWorldGroup;
61 
62 
63  if( 1 == nMPILevel )
64  {
65  m_mpiCommIndex = MPI_COMM_WORLD;
66  return bRtn;
67  }
68 
69  if( nFindingDegeneratedEVCount > 1 )
70  {
71  m_bNeedPostOperation[1] = true;
72  nNeedNodeCount *= nFindingDegeneratedEVCount;
73  nMPILevel--;
74  }
75 
77  if( nMPILevel == 1 )
78  {
79  bRtn = CheckDeflationNodeCount(nNeedNodeCount);
80  if( !bRtn )
81  return bRtn;
82  }
83 
85  nPerGroupNode = GetTotalNodeCount() / nNeedNodeCount;
86  nLanczosGroupIndex = GetCurrentRank() / nPerGroupNode;
87  pNewGroupRank = (int*)malloc(sizeof(int)*nPerGroupNode);
88 
90  for( i = 0; i < nPerGroupNode ; ++i)
91  pNewGroupRank [i] = nLanczosGroupIndex * nPerGroupNode + i;
92 
93  MPI_Comm_group(MPI_COMM_WORLD,&commWorldGroup);
94  MPI_Group_incl(commWorldGroup,nPerGroupNode,pNewGroupRank ,&m_lanczosGroup);
95  MPI_Comm_create(MPI_COMM_WORLD,m_lanczosGroup,&m_mpiCommIndex);
96  MPI_Comm_size(m_mpiCommIndex, &world_size);
97  MPI_Comm_rank(m_mpiCommIndex, &rank);
98  SetMPIEnviroment(rank, world_size);
99  m_nLanczosGroupIndex = nLanczosGroupIndex;
100 
102  pNewGroupRank = (int*)realloc(pNewGroupRank , sizeof(int)*nFindingDegeneratedEVCount);
103  for( i = 0; i < nFindingDegeneratedEVCount ; ++i)
104  pNewGroupRank [i] = i * nPerGroupNode + GetCurrentRank();
105 
106  MPI_Comm_group(MPI_COMM_WORLD,&commWorldGroup);
107  MPI_Group_incl(commWorldGroup,nFindingDegeneratedEVCount,pNewGroupRank ,&m_deflationGroup);
108  MPI_Comm_create(MPI_COMM_WORLD,m_deflationGroup,&m_deflationComm);
109  MPI_Comm_rank(m_deflationComm, &rank);
110  MPI_Comm_size(m_mpiCommIndex, &world_size);
111 
112  m_bMultiLevel = true;
113 
114  FREE_MEM(pNewGroupRank );
115 
118 
119  bRtn = true;
120  return bRtn;
121 }
static void SetMPIEnviroment(int nRank, int nTotalNode)
Set MPI Enviroment.
static int GetTotalNodeCount()
Definition: KNMPIManager.h:44
static bool CheckDeflationNodeCount(int nNeedNodeCount)
Checking node counts fix to deflation group.
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: KNMPIManager.h:102
static int m_nCommWorldRank
MPI Rank before split.
Definition: KNMPIManager.h:87
static void SetShow(bool bShow)
Definition: KNIPCCUtility.h:30
static bool IsLanczosComputeRoot()
Barrier current deflation group.
Definition: KNMPIManager.h:71
static bool m_bMultiLevel
Flag for Multilevel MPI group.
Definition: KNMPIManager.h:106
static int GetCurrentRank()
Definition: KNMPIManager.h:42
static MPI_Group m_deflationGroup
MPI Group for Deflation computation.
Definition: KNMPIManager.h:104
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static bool m_bNeedPostOperation[10]
MPI Level.
Definition: KNMPIManager.h:100
static MPI_Group m_lanczosGroup
MPI Group for Lanczos computation.
Definition: KNMPIManager.h:103
static unsigned int m_nLanczosGroupIndex
MPI Group index for Lanczos group.
Definition: KNMPIManager.h:105
static bool IsDeflationRoot()
Checking is root rank of Lanczos computation.
Definition: KNMPIManager.h:72

Here is the call graph for this function:

Here is the caller graph for this function:

static bool CKNMPIManager::IsDeflationRoot ( )
inlinestatic

Checking is root rank of Lanczos computation.

Definition at line 72 of file KNMPIManager.h.

References IsRootRank(), and m_deflationComm.

Referenced by CKNGeometricShape::ConstructMapInfo(), CKNIPCCUtility::DumpCSR(), GatherVDouble(), GatherVInt(), InitLevel(), CKNTBMS_Solver::Launching_TBMS_Solver(), CKNLanczosMethod::MergeDegeneratedEigenvalues(), CKNLanczosMethod::RecalcuWaveFunction(), CKNLanczosMethod::SaveLanczosResult(), and CKNLanczosMethod::ShowLanczosResult().

72 { return IsRootRank(m_deflationComm); };
static MPI_Comm m_deflationComm
Deflation computing MPI_Comm.
Definition: KNMPIManager.h:102
static bool IsRootRank()
Get Total node count.

Here is the call graph for this function:

Here is the caller graph for this function:

static bool CKNMPIManager::IsInMPIRoutine ( )
inlinestatic

Definition at line 47 of file KNMPIManager.h.

References m_bStartMPI.

Referenced by BroadcastVector(), and CKNLanczosTest::TestCSRBuildingViaFileLoad().

47 { return m_bStartMPI; };
static bool m_bStartMPI
MPI_Init call or not.
Definition: KNMPIManager.h:89

Here is the caller graph for this function:

static bool CKNMPIManager::IsLanczosComputeRoot ( )
inlinestatic

Barrier current deflation group.

Definition at line 71 of file KNMPIManager.h.

References IsRootRank(), and m_mpiCommIndex.

Referenced by InitLevel(), and CKNLanczosMethod::MergeDegeneratedEigenvalues().

71 { return IsRootRank(m_mpiCommIndex);};
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static bool IsRootRank()
Get Total node count.

Here is the call graph for this function:

Here is the caller graph for this function:

static bool CKNMPIManager::IsMultiLevelMPI ( )
inlinestatic

Get MPI_Comm.

Definition at line 68 of file KNMPIManager.h.

References m_bMultiLevel.

Referenced by CKNLanczosMethod::LanczosIteration(), CKNTBMS_Solver::Launching_TBMS_Solver(), and CKNLanczosLaunching::LaunchingLanczos().

68 { return m_bMultiLevel; };
static bool m_bMultiLevel
Flag for Multilevel MPI group.
Definition: KNMPIManager.h:106

Here is the caller graph for this function:

bool CKNMPIManager::IsRootRank ( )
static

Get Total node count.

Check this node is root rank?

Returns
Root rank or not

Definition at line 217 of file KNMPIManager.cpp.

References GetCurrentRank().

Referenced by CKNTBMS_Solver::ApplyPhPotential(), BroadcastLanczosResult(), CKNGeometricShape::CalculateUnitcellCount(), CKNLanczosTest::COmpareWIthMatLabSeOrthMPI(), CKNGeometricShape::ConstructMapInfo(), CKNLanczosMethod::DoEigenValueSolving(), CKNLanczosMethod::DoResidualCheck(), CKNIPCCUtility::DumpCSR(), CKNMatrixDebug::DumpCSR(), CKNIPCCUtility::DumpCSRBinary(), CKNMatrixDebug::DumpCSRBinary(), CKNGeometricShape::ExchangeAtomInfoBetweenNode(), CKNLanczosMethod::FinalizeLanczosInterationVariable(), GetEigenvalueCountFromDeflationGroup(), CKNGeometricShape::GetPeriodicDirection(), CKNLanczosMethod::InitLanczosIterationVariables(), IsDeflationRoot(), CKNGeometricShape::IsInBoundaryCondition(), IsLanczosComputeRoot(), CKNLanczosMethod::LanczosIteration(), CKNLanczosMethod::LanczosIterationLoop(), CKNLanczosTest::LargeSizeMatrixMPI(), CKNTBMS_Solver::Launching_TBMS_Solver(), CKNGeometricConstructionLaunch::LaunchingGeometricConstructionMPI(), CKNLanczosLaunching::LaunchingLanczos(), CKNLanczosMethod::MergeDegeneratedEigenvalues(), CKNGeometricShape::PeriodicUnitCellNumbering(), CKNLanczosMethod::SaveLanczosResult(), CKNGeometricConstructionLaunch::SetShapeInformation(), CKNGeometricShape::SetShapeInformation(), CKNLanczosMethod::ShowLanczosResult(), and CKNLanczosMethod::SortSolution().

218 {
219  bool bRtn = true;
220 
221 #ifdef DISABLE_MPI_ROUTINE
222  return bRtn;
223 #endif
224 
225  if (0 == GetCurrentRank())
226  return bRtn;
227 
228  bRtn = false;
229  return bRtn;
230 }
static int GetCurrentRank()
Definition: KNMPIManager.h:42

Here is the call graph for this function:

Here is the caller graph for this function:

bool CKNMPIManager::IsRootRank ( MPI_Comm  comm)
static

Check this node is root rank in 'comm' MPI_Comm.

Parameters
commMPI_Comm
Returns
Root rank or not in 'comm' MPI_Comm

Definition at line 236 of file KNMPIManager.cpp.

References GetCurrentRank().

237 {
238  if (MPI_COMM_NULL == comm)
239  return true;
240 
241  if( 0 == GetCurrentRank(comm) )
242  return true;
243  else
244  return false;
245 }
static int GetCurrentRank()
Definition: KNMPIManager.h:42

Here is the call graph for this function:

void CKNMPIManager::LoadBlancing ( int  nElementCount)
static

Load blancing for MPI, This function for lanczos solving with geometric constrcution.

Parameters
nElementCountLoad balancing count
Remarks
This function for lanczos solving with geometric constrcution

Definition at line 189 of file KNMPIManager.cpp.

References m_mpiCommIndex, m_nTotalNode, and m_pLoadBalance.

Referenced by CKNTBMS_Solver::AllocateCSR().

190 {
191  m_pLoadBalance = (int *)malloc(sizeof(int)*(m_nTotalNode)); // For communication size
192 
193 #ifdef DISABLE_MPI_ROUTINE
194  return;
195 #endif
196 
197  MPI_Allgather(&nElementCount, 1, MPI_INTEGER, m_pLoadBalance, 1, MPI_INTEGER, m_mpiCommIndex);
198 }
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88
static int * m_pLoadBalance
Load blancing for MPI Communication.
Definition: KNMPIManager.h:90
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101

Here is the caller graph for this function:

void CKNMPIManager::LoadBlancingForLanczos ( int  nRowCount)
static

Load blancing for MPI, this function only for lanczos solving without geometric constrcution.

Parameters
nRowCountMatrix row count
Remarks
This function only for lanczos solving without geometric construction, Written by Sungkun Shin

Definition at line 150 of file KNMPIManager.cpp.

References m_nTotalNode, m_pLoadBalance, and CKNIPCCUtility::ShowMsg().

Referenced by CKNLanczosTest::COmpareWIthMatLabSeOrthMPI(), CKNLanczosTest::LargeSizeMatrixMPI(), and CKNGeometricConstructionLaunch::LaunchingGeometricConstructionMPI().

151 {
152  int i, j, temp;
153  m_pLoadBalance = (int *)malloc(sizeof(int)*(m_nTotalNode)); // For communication size
154  m_pLoadBalance[0] = nRowCount;
155 
156 #ifdef DISABLE_MPI_ROUTINE
157  return;
158 #endif
159 
160  if (nRowCount < 10 * m_nTotalNode){
161  CKNIPCCUtility::ShowMsg("ERROR :: matrix dimension < 10 * number of CPU for Lanczos "); // Error out
162  }
163 
164  if (0 == (nRowCount / 10) % m_nTotalNode){
165  for (i = 0; i<m_nTotalNode; i++){
166  m_pLoadBalance[i] = nRowCount / m_nTotalNode;
167  }
168  }
169  else{
170  temp = nRowCount;
171  for (i = m_nTotalNode - 1; i>-1; i--){
172  m_pLoadBalance[i] = ((int)(nRowCount / m_nTotalNode / 10 + 1)) * 10;
173  temp -= m_pLoadBalance[i];
174  if ((temp / 10) % i == 0){
175  for (j = 0; j < i; j++){
176  m_pLoadBalance[j] = temp / i;
177  }
178  temp = 0;
179  break;
180  }
181  }
182  }
183 }
static void ShowMsg(char *pszBuffer)
Show message.
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88
static int * m_pLoadBalance
Load blancing for MPI Communication.
Definition: KNMPIManager.h:90

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::MergeVector ( CKNMatrixOperation::CKNVector pVector,
CKNMatrixOperation::CKNVector pResultVector,
unsigned int  nMergeSize 
)
static

Merge vector to sub rank.

Parameters
pVectorVector for sharing
[out]pResultVectorVector for saving merging result
nMergeSizeVector size that after mergsing

Definition at line 276 of file KNMPIManager.cpp.

References CKNTimeMeasurement::COMM, FREE_MEM, GetCurrentLoadBalanceCount(), m_mpiCommIndex, m_pDispls, m_pRecvCount, CKNMatrixOperation::CKNVector::m_vectValueImaginaryBuffer, CKNMatrixOperation::CKNVector::m_vectValueRealBuffer, CKNTimeMeasurement::MeasurementEnd(), and CKNTimeMeasurement::MeasurementStart().

Referenced by CKNMatrixOperation::MVMul().

277 {
278  LPCOMPLEX_NUMBER lpSendBuffer = NULL;
279  unsigned int i;
280 
281 #ifdef DISABLE_MPI_ROUTINE
282  return;
283 #endif
284 
285 
286  double *pBuffer = (double*)malloc(sizeof(double)*nMergeSize);
288  MPI_Allgatherv(pVector->m_vectValueRealBuffer.data(), GetCurrentLoadBalanceCount(), MPI_DOUBLE,
289  pBuffer, m_pRecvCount, m_pDispls, MPI_DOUBLE, m_mpiCommIndex);
290 
291  for (i = 0; i < nMergeSize; i++)
292  pResultVector->m_vectValueRealBuffer[i] = pBuffer[i];
293 
294  MPI_Allgatherv(pVector->m_vectValueImaginaryBuffer.data(), GetCurrentLoadBalanceCount(), MPI_DOUBLE,
295  pBuffer, m_pRecvCount, m_pDispls, MPI_DOUBLE, m_mpiCommIndex);
296 
297  for (i = 0; i < nMergeSize; i++)
298  pResultVector->m_vectValueImaginaryBuffer[i] = pBuffer[i];
299 
300  FREE_MEM(pBuffer);
301 
303 
304 }
static int GetCurrentLoadBalanceCount()
Get Current node's rank load balancing number.
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static int * m_pRecvCount
Reciving count variable for MPI comminication.
Definition: KNMPIManager.h:93
double_vector_t m_vectValueImaginaryBuffer
A member variable for saving none zero elements.
static int * m_pDispls
Displ for MPI comminication.
Definition: KNMPIManager.h:96
double_vector_t m_vectValueRealBuffer
A member variable for saving none zero elements.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
struct CKNMPIManager::COMPLEX_NUMBER * LPCOMPLEX_NUMBER

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::MergeVectorEx_Optimal ( CKNMatrixOperation::CKNVector pSrcVector,
CKNMatrixOperation::CKNVector pResultVector,
unsigned int  nMergeSize,
double  fFirstIndex,
unsigned int  nSizeFromPrevRank,
unsigned int  nSizeFromNextRank,
unsigned int  nSizetoPrevRank,
unsigned int  nSizetoNextRank,
unsigned int *  mPos 
)
static

Merge vector for 1 layer exchanging.

Parameters
pSrcVectorVector for sharing
[out]pResultVectorVector for saving merging result
nMergeSizeVector size that after mergsing
fFirstIndexFirst index for local vector index
nSizeFromPrevRankExchanging size from previous node
nSizeFromNextRankExchanging size from next node
nSizetoPrevRankExchanging size to previous node
nSizetoNextRankExchanging size to next node
mPosPrevious, local, next node start index

Definition at line 317 of file KNMPIManager.cpp.

References GetCurrentLoadBalanceCount(), GetLoadBalanceCount(), m_mpiCommIndex, m_nCurrentRank, m_nTotalNode, m_pDispls, m_pRecvCount, CKNMatrixOperation::CKNVector::m_vectValueImaginaryBuffer, CKNMatrixOperation::CKNVector::m_vectValueRealBuffer, CKNTimeMeasurement::MeasurementEnd(), CKNTimeMeasurement::MeasurementStart(), and CKNTimeMeasurement::MV_COMM.

Referenced by CKNMatrixOperation::MVMulEx_Optimal().

318 {
319  if (m_nTotalNode <= 3)
320  {
322  MPI_Allgatherv(pSrcVector->m_vectValueRealBuffer.data(), GetCurrentLoadBalanceCount(), MPI_DOUBLE, pResultVector->m_vectValueRealBuffer.data(), m_pRecvCount, m_pDispls, MPI_DOUBLE, m_mpiCommIndex);
323  MPI_Allgatherv(pSrcVector->m_vectValueImaginaryBuffer.data(), GetCurrentLoadBalanceCount(), MPI_DOUBLE, pResultVector->m_vectValueImaginaryBuffer.data(), m_pRecvCount, m_pDispls, MPI_DOUBLE, m_mpiCommIndex);
325  mPos[0] = -1; mPos[1] = -1; mPos[2] = -1;
326  }
327  else
328  {
329  long long fCurrentRankPos = fFirstIndex;
330  int nPrevRank = (m_nCurrentRank - 1 + m_nTotalNode) % m_nTotalNode;
331  int nNextRank = (m_nCurrentRank + 1) % m_nTotalNode;
332  long long fPrevRankPos = -1, fNextRankPos = -1;
333  double *pSendBuffer = NULL, *pRecvBuffer = NULL;
334  MPI_Request req[2];
335  MPI_Status status[2];
336 
337  if (0 == m_nCurrentRank)
338  fPrevRankPos = nMergeSize - nSizeFromPrevRank;
339  else
340  fPrevRankPos = fFirstIndex - nSizeFromPrevRank;
341 
342  if (m_nCurrentRank == m_nTotalNode - 1)
343  fNextRankPos = 0;
344  else
345  fNextRankPos = fFirstIndex + GetLoadBalanceCount(m_nCurrentRank);
346 
347  mPos[0] = (unsigned int)fPrevRankPos; mPos[1] = (unsigned int)fCurrentRankPos; mPos[2] = (unsigned int)fNextRankPos;
348 
349  //printf("Rank %d: myload=%d, nSizeFromPrevRank=%d, nSizeFromNextRank=%d, nSizetoPrevRank=%d, nSizetoNextRank=%d, fPrevRankPos=%d, fNextRankPos=%d\n", m_nCurrentRank, GetLoadBalanceCount(m_nCurrentRank), nSizeFromPrevRank, nSizeFromNextRank, nSizetoPrevRank, nSizetoNextRank, fPrevRankPos, fNextRankPos);
350 
351  pSendBuffer = pSrcVector->m_vectValueRealBuffer.data();
352  pRecvBuffer = pResultVector->m_vectValueRealBuffer.data();
353 
355  MPI_Irecv(pRecvBuffer + fPrevRankPos, nSizeFromPrevRank, MPI_DOUBLE, nPrevRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
356  MPI_Isend(pSendBuffer + GetLoadBalanceCount(m_nCurrentRank) - nSizetoNextRank, nSizetoNextRank, MPI_DOUBLE, nNextRank, nNextRank, m_mpiCommIndex, &req[1]);
357  MPI_Waitall(2, req, status);
358 
359  MPI_Irecv(pRecvBuffer + fNextRankPos, nSizeFromNextRank, MPI_DOUBLE, nNextRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
360  MPI_Isend(pSendBuffer, nSizetoPrevRank, MPI_DOUBLE, nPrevRank, nPrevRank, m_mpiCommIndex, &req[1]);
361  MPI_Waitall(2, req, status);
363 
364  pSendBuffer = pSrcVector->m_vectValueImaginaryBuffer.data();
365  pRecvBuffer = pResultVector->m_vectValueImaginaryBuffer.data();
366 
368  MPI_Irecv(pRecvBuffer + fPrevRankPos, nSizeFromPrevRank, MPI_DOUBLE, nPrevRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
369  MPI_Isend(pSendBuffer + GetLoadBalanceCount(m_nCurrentRank) - nSizetoNextRank, nSizetoNextRank, MPI_DOUBLE, nNextRank, nNextRank, m_mpiCommIndex, &req[1]);
370  MPI_Waitall(2, req, status);
371 
372  MPI_Irecv(pRecvBuffer + fNextRankPos, nSizeFromNextRank, MPI_DOUBLE, nNextRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
373  MPI_Isend(pSendBuffer, nSizetoPrevRank, MPI_DOUBLE, nPrevRank, nPrevRank, m_mpiCommIndex, &req[1]);
374  MPI_Waitall(2, req, status);
376 
377  memcpy(pResultVector->m_vectValueRealBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueRealBuffer.data(), GetLoadBalanceCount(m_nCurrentRank) * sizeof(double));
378  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueImaginaryBuffer.data(), GetLoadBalanceCount(m_nCurrentRank) * sizeof(double));
379 
380  }
381 }
static int GetCurrentLoadBalanceCount()
Get Current node's rank load balancing number.
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static int GetLoadBalanceCount(int nRank)
static int * m_pRecvCount
Reciving count variable for MPI comminication.
Definition: KNMPIManager.h:93
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88
double_vector_t m_vectValueImaginaryBuffer
A member variable for saving none zero elements.
static int * m_pDispls
Displ for MPI comminication.
Definition: KNMPIManager.h:96
double_vector_t m_vectValueRealBuffer
A member variable for saving none zero elements.
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static int m_nCurrentRank
Getting Lanczos group index.
Definition: KNMPIManager.h:83

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::MergeVectorOptimal ( CKNMatrixOperation::CKNVector pSrcVector,
CKNMatrixOperation::CKNVector pResultVector,
unsigned int  nMergeSize,
double  fFirstIndex 
)
static

Merge vector to sub rank, operated without vector class member function call.

Parameters
pSrcVectorVector for sharing
[out]pResultVectorVector for saving merging result
nMergeSizeVector size that after mergsing
fFirstIndexFirst index for local vector index

Definition at line 389 of file KNMPIManager.cpp.

References FREE_MEM, GetCurrentLoadBalanceCount(), GetLoadBalanceCount(), m_mpiCommIndex, m_nCurrentRank, m_nTotalNode, m_pDispls, m_pRecvCount, CKNMatrixOperation::CKNVector::m_vectValueImaginaryBuffer, CKNMatrixOperation::CKNVector::m_vectValueRealBuffer, CKNTimeMeasurement::MeasurementEnd(), CKNTimeMeasurement::MeasurementStart(), CKNTimeMeasurement::MV_COMM, CKNTimeMeasurement::MV_FREE_MEM, and CKNTimeMeasurement::MV_MALLOC.

Referenced by CKNMatrixOperation::MVMulOptimal().

390 {
391  if( m_nTotalNode <= 3 )
392  {
394  MPI_Allgatherv(pSrcVector->m_vectValueRealBuffer.data(), GetCurrentLoadBalanceCount(), MPI_DOUBLE,
395  pResultVector->m_vectValueRealBuffer.data(), m_pRecvCount, m_pDispls, MPI_DOUBLE, m_mpiCommIndex);
396  MPI_Allgatherv(pSrcVector->m_vectValueImaginaryBuffer.data(), GetCurrentLoadBalanceCount(), MPI_DOUBLE,
397  pResultVector->m_vectValueImaginaryBuffer.data(), m_pRecvCount, m_pDispls, MPI_DOUBLE, m_mpiCommIndex);
399  }
400  else
401  {
402  double fCurrentRankPos = fFirstIndex;
403  long long fPrevRankPos = -1, fNextRankPos = -1;
404  int nPrevRank = (m_nCurrentRank-1+m_nTotalNode)%m_nTotalNode;
405  int nNextRank = (m_nCurrentRank+1)%m_nTotalNode;
406  long long nMax;
407  double *pSendBuffer = NULL, *pRecvBuffer = NULL;
408  MPI_Request req[2];
409  MPI_Status status[2];
410 
411  if( 0 == m_nCurrentRank )
412  fPrevRankPos = nMergeSize - GetLoadBalanceCount(nPrevRank);
413  else
414  fPrevRankPos = fFirstIndex - GetLoadBalanceCount(nPrevRank);
415 
416  if (m_nCurrentRank == m_nTotalNode - 1)
417  fNextRankPos = 0;
418  else
419  fNextRankPos = fCurrentRankPos + GetCurrentLoadBalanceCount();
420 
421 #ifdef _WIN32
422  nMax = max(GetLoadBalanceCount(nPrevRank), GetLoadBalanceCount(nNextRank));
423 #else //_WIN32
424  nMax = std::max(GetLoadBalanceCount(nPrevRank), GetLoadBalanceCount(nNextRank));
425 #endif//
426 
428  pRecvBuffer = (double*)malloc(sizeof(double)*nMax*2);
429  pSendBuffer = (double*)malloc(sizeof(double)*GetLoadBalanceCount(m_nCurrentRank)*2);
431 
432 
433  memcpy(pSendBuffer, pSrcVector->m_vectValueRealBuffer.data(), sizeof(double)*GetLoadBalanceCount(m_nCurrentRank));
434  memcpy(pSendBuffer+ GetLoadBalanceCount(m_nCurrentRank), pSrcVector->m_vectValueImaginaryBuffer.data() , sizeof(double)*GetLoadBalanceCount(m_nCurrentRank));
435 
437  MPI_Irecv(pRecvBuffer, 2 * GetLoadBalanceCount(nPrevRank), MPI_DOUBLE, nPrevRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
438  MPI_Isend(pSendBuffer, 2 * GetLoadBalanceCount(m_nCurrentRank), MPI_DOUBLE, nNextRank, nNextRank, m_mpiCommIndex, &req[1]);
439  MPI_Waitall(2, req, status);
441 
442  memcpy(pResultVector->m_vectValueRealBuffer.data() + fPrevRankPos, pRecvBuffer, GetLoadBalanceCount(nPrevRank) * sizeof(double));
443  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + fPrevRankPos, pRecvBuffer + GetLoadBalanceCount(nPrevRank), GetLoadBalanceCount(nPrevRank) * sizeof(double));
444 
446  MPI_Irecv(pRecvBuffer, 2 * GetLoadBalanceCount(nNextRank), MPI_DOUBLE, nNextRank, m_nCurrentRank, m_mpiCommIndex, &req[0]);
447  MPI_Isend(pSendBuffer, 2 * GetLoadBalanceCount(m_nCurrentRank), MPI_DOUBLE, nPrevRank, nPrevRank, m_mpiCommIndex, &req[1]);
448  MPI_Waitall(2, req, status);
450  memcpy(pResultVector->m_vectValueRealBuffer.data() + fNextRankPos, pRecvBuffer, GetLoadBalanceCount(nNextRank) * sizeof(double));
451  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + fNextRankPos, pRecvBuffer + GetLoadBalanceCount(nNextRank), GetLoadBalanceCount(nNextRank) * sizeof(double));
452 
454  FREE_MEM(pRecvBuffer);
455  FREE_MEM(pSendBuffer);
457 
458  memcpy(pResultVector->m_vectValueRealBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueRealBuffer.data(), GetLoadBalanceCount(m_nCurrentRank) * sizeof(double));
459  memcpy(pResultVector->m_vectValueImaginaryBuffer.data() + (long long)fFirstIndex, pSrcVector->m_vectValueImaginaryBuffer.data(), GetLoadBalanceCount(m_nCurrentRank) * sizeof(double));
460  }
461 }
static int GetCurrentLoadBalanceCount()
Get Current node's rank load balancing number.
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static int GetLoadBalanceCount(int nRank)
static int * m_pRecvCount
Reciving count variable for MPI comminication.
Definition: KNMPIManager.h:93
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88
double_vector_t m_vectValueImaginaryBuffer
A member variable for saving none zero elements.
static int * m_pDispls
Displ for MPI comminication.
Definition: KNMPIManager.h:96
double_vector_t m_vectValueRealBuffer
A member variable for saving none zero elements.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
static int m_nCurrentRank
Getting Lanczos group index.
Definition: KNMPIManager.h:83

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::ReceiveDoubleBufferSync ( int  nSourceRank,
double *  pBuffer,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Receivinging buffer for double data array with sync.

Parameters
nSourceRankSource rank index
pBufferData buffer that want to receive
nSizeData buffer size
reqMPI request parameter

Definition at line 770 of file KNMPIManager.cpp.

References GetCurrentRank(), and m_mpiCommIndex.

Referenced by CKNGeometricShape::ConstructMapInfo(), CKNIPCCUtility::DumpCSR(), CKNMatrixDebug::DumpCSR(), CKNMatrixDebug::DumpCSRBinary(), CKNIPCCUtility::DumpCSRBinary(), CKNGeometricShape::ExchangeAtomInfoBetweenNode(), and ReceiveVectorSync().

771 {
772  MPI_Status status;
773  if( MPI_COMM_NULL == commWorld)
774  //MPI_Recv(pBuffer, nSize, MPI_DOUBLE, nSourceRank, 0, m_mpiCommIndex, &status);
775  MPI_Recv(pBuffer, nSize, MPI_DOUBLE, nSourceRank, CKNMPIManager::GetCurrentRank(), m_mpiCommIndex, &status);
776  else
777  //MPI_Recv(pBuffer, nSize, MPI_DOUBLE, nSourceRank, 0, commWorld, &status);
778  MPI_Recv(pBuffer, nSize, MPI_DOUBLE, nSourceRank, CKNMPIManager::GetCurrentRank(commWorld), commWorld, &status);
779 }
static int GetCurrentRank()
Definition: KNMPIManager.h:42
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::ReceiveVectorSync ( int  nSourceRank,
CKNMatrixOperation::CKNVector pVector,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Receiving Vector with sync.

Parameters
nSourceRankSource rank for receiving data
pVectorReceiving buffer
nSizeReceiving size
reqMPI_Request for MPI_Recv
commWorldMPI_Comm for Receiving data

Definition at line 910 of file KNMPIManager.cpp.

References FREE_MEM, ReceiveDoubleBufferSync(), and CKNMatrixOperation::CKNVector::Serialize().

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

911 {
912  double *pBuffer = NULL;
913 
914  pBuffer = (double*)malloc(sizeof(double)*nSize*2);
915 
916  ReceiveDoubleBufferSync(nSourceRank, pBuffer, nSize * 2, req, commWorld);
917 
918  pVector->Serialize(pBuffer, true);
919 
920  FREE_MEM(pBuffer);
921 }
bool Serialize(double *pBuffer, bool bStore)
Serialize vector.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static void ReceiveDoubleBufferSync(int nSourceRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
Receivinging buffer for double data array with sync.

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::SendDoubleBufferSync ( int  nTargetRank,
double *  pBuffer,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Sending buffer for double data array with sync.

Parameters
nTargetRankTarget rank index
pBufferData buffer that want to send
nSizeData buffer size
reqMPI request parameter

Definition at line 742 of file KNMPIManager.cpp.

References GetCurrentRank(), and m_mpiCommIndex.

Referenced by CKNGeometricShape::ConstructMapInfo(), CKNIPCCUtility::DumpCSR(), CKNMatrixDebug::DumpCSR(), CKNMatrixDebug::DumpCSRBinary(), CKNIPCCUtility::DumpCSRBinary(), CKNGeometricShape::ExchangeAtomInfoBetweenNode(), and SendVectorSync().

743 {
744  int nRank = CKNMPIManager::GetCurrentRank();
745  if( MPI_COMM_NULL == commWorld)
746  //MPI_Send(pBuffer, nSize, MPI_DOUBLE, nTargetRank, 0, m_mpiCommIndex);
747  MPI_Send(pBuffer, nSize, MPI_DOUBLE, nTargetRank, nTargetRank, m_mpiCommIndex);
748  else
749  if( MPI_SUCCESS != MPI_Send(pBuffer, nSize, MPI_DOUBLE, nTargetRank, nTargetRank, commWorld) )
750  //if( MPI_SUCCESS != MPI_Send(pBuffer, nSize, MPI_DOUBLE, nTargetRank, 0, commWorld) )
751  printf("Oh my god!\n");
752 }
static int GetCurrentRank()
Definition: KNMPIManager.h:42
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::SendVectorSync ( int  nTargetRank,
CKNMatrixOperation::CKNVector pVector,
int  nSize,
MPI_Request *  req,
MPI_Comm  commWorld = MPI_COMM_NULL 
)
static

Getting Deflation computing group MPI_Comm.

Sending Vector with sync

Parameters
nTargetRankSending target rank
pVectorSending buffer
nSizeSending size
reqMPI_Request for MPI_Send
commWorldMPI_Comm for sending data

Definition at line 890 of file KNMPIManager.cpp.

References FREE_MEM, SendDoubleBufferSync(), and CKNMatrixOperation::CKNVector::Serialize().

Referenced by CKNLanczosMethod::MergeDegeneratedEigenvalues().

891 {
892  double *pBuffer = NULL;
893 
894  pBuffer = (double*)malloc(sizeof(double)*nSize*2);
895  pVector->Serialize(pBuffer, false);
896 
897  SendDoubleBufferSync(nTargetRank, pBuffer, nSize * 2, req, commWorld);
898 
899 
900  FREE_MEM(pBuffer);
901 }
bool Serialize(double *pBuffer, bool bStore)
Serialize vector.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
static void SendDoubleBufferSync(int nTargetRank, double *pBuffer, int nSize, MPI_Request *req, MPI_Comm commWorld=MPI_COMM_NULL)
Sending buffer for double data array with sync.

Here is the call graph for this function:

Here is the caller graph for this function:

void CKNMPIManager::SetMPIEnviroment ( int  nRank,
int  nTotalNode 
)
static

Set MPI Enviroment.

Parameters
nRankCurrent rank index
nTotalNodeTotal rank count

Definition at line 139 of file KNMPIManager.cpp.

References m_bStartMPI, m_nCurrentRank, and m_nTotalNode.

Referenced by CKNLanczosTest::COmpareWIthMatLabSeOrthMPI(), InitLevel(), CKNTBMS_Solver::InitMPIEnv(), CKNLanczosTest::LargeSizeMatrixMPI(), CKNGeometricConstructionLaunch::LaunchingGeometricConstructionMPI(), and CKNLanczosLaunching::LaunchingLanczos().

140 {
141  m_nCurrentRank = nRank;
142  m_nTotalNode = nTotalNode;
143  m_bStartMPI = true;
144 }
static int m_nTotalNode
Total node count.
Definition: KNMPIManager.h:88
static int m_nCurrentRank
Getting Lanczos group index.
Definition: KNMPIManager.h:83
static bool m_bStartMPI
MPI_Init call or not.
Definition: KNMPIManager.h:89

Here is the caller graph for this function:

void CKNMPIManager::SplitVector ( CKNMatrixOperation::CKNVector pVector,
int  nRootRank 
)
static

Split vector to sub rank.

Parameters
pVectorVector for want to share
nRootRankRoot rank index

Definition at line 467 of file KNMPIManager.cpp.

References CKNTimeMeasurement::COMM, ConvertVectorToMPIComplexBuffer(), ERROR_MALLOC, CKNMPIManager::COMPLEX_NUMBER::fImginary, CKNMatrixOperation::CKNVector::Finalize(), CKNMPIManager::COMPLEX_NUMBER::fReal, FREE_MEM, CKNTimeMeasurement::FREE_MEM, GetCurrentLoadBalanceCount(), GetCurrentRank(), GetLoadBalanceCount(), GetTotalNodeCount(), m_mpiCommIndex, CKNTimeMeasurement::MALLOC, CKNTimeMeasurement::MeasurementEnd(), CKNTimeMeasurement::MeasurementStart(), CKNMatrixOperation::CKNVector::SetAt(), and CKNMatrixOperation::CKNVector::SetSize().

468 {
469  LPCOMPLEX_NUMBER lpSendBuffer = NULL;
470  LPCOMPLEX_NUMBER lpRecvBuffer = NULL;
471  unsigned int i;
472  int *pSendCounts = NULL, *pDispls = NULL;
473 
474 #ifdef DISABLE_MPI_ROUTINE
475  return;
476 #endif
477 
479  pSendCounts = (int*)malloc(sizeof(int)*GetTotalNodeCount());
480  pDispls = (int*)malloc(sizeof(int)*GetTotalNodeCount());
482 
483  pDispls[0] = 0;
484  pSendCounts[0] = 2 * GetLoadBalanceCount(0);
485  for (i = 1; i < (unsigned int)GetTotalNodeCount(); i++)
486  {
487  pSendCounts[i] = 2 * GetLoadBalanceCount(i);
488  pDispls[i] = pDispls[i - 1] + 2 * GetLoadBalanceCount(i - 1);
489  }
490 
491  if (nRootRank == GetCurrentRank())
492  {
493  lpSendBuffer = ConvertVectorToMPIComplexBuffer(pVector);
494  pVector->Finalize();
495  }
496 
498  lpRecvBuffer = (LPCOMPLEX_NUMBER)malloc(sizeof(COMPLEX_NUMBER)*GetCurrentLoadBalanceCount() * 2);
500 
501  if (NULL == lpRecvBuffer)
502  throw ERROR_MALLOC;
503 
505  MPI_Scatterv(lpSendBuffer, pSendCounts, pDispls, MPI_DOUBLE,
506  lpRecvBuffer, 2 * GetCurrentLoadBalanceCount(), MPI_DOUBLE, nRootRank, m_mpiCommIndex);
508 
510  for (i = 0; i < (unsigned int)GetCurrentLoadBalanceCount(); i++)
511  pVector->SetAt(i, lpRecvBuffer[i].fReal, lpRecvBuffer[i].fImginary);
512 
514  FREE_MEM(pSendCounts);
515  FREE_MEM(pDispls);
516  FREE_MEM(lpRecvBuffer);
518 }
void SetSize(unsigned int nSize)
Set Vector elements size.
static int GetCurrentLoadBalanceCount()
Get Current node's rank load balancing number.
static void MeasurementEnd(MEASUREMENT_INDEX index)
Measurement end for part.
static int GetTotalNodeCount()
Definition: KNMPIManager.h:44
static int GetLoadBalanceCount(int nRank)
static LPCOMPLEX_NUMBER ConvertVectorToMPIComplexBuffer(CKNMatrixOperation::CKNVector *pVector)
Convert vector class to MPI_COMPLEX array.
void Finalize()
Free allocated memory for vector elements.
static int GetCurrentRank()
Definition: KNMPIManager.h:42
void SetAt(unsigned int nIndex, CKNComplex value)
Set element value in specific index, Call by value.
#define FREE_MEM(pointer)
Macro for memory allocation and assign null value.
Definition: CKNGlobal.h:20
const unsigned long ERROR_MALLOC
Error code that means error occur during memory allocation.
Definition: CKNGlobal.h:62
static MPI_Comm m_mpiCommIndex
Lanczos Method MPI_Comm.
Definition: KNMPIManager.h:101
static void MeasurementStart(MEASUREMENT_INDEX index)
Measurement start for part.
struct CKNMPIManager::COMPLEX_NUMBER * LPCOMPLEX_NUMBER

Here is the call graph for this function:

void CKNMPIManager::WaitReceiveDoubleBufferAsync ( MPI_Request *  req)
static

Waiting recevinging double buffer sync function.

Parameters
reqMPI request parameter

Definition at line 784 of file KNMPIManager.cpp.

References m_ReceiveDoubleAsyncRequest.

785 {
786  MPI_Status status;
787 
788  MPI_Wait(&m_ReceiveDoubleAsyncRequest, &status);
789 }
static MPI_Request m_ReceiveDoubleAsyncRequest
Request for receving double.
Definition: KNMPIManager.h:98
void CKNMPIManager::WaitSendDoubleBufferSync ( MPI_Request *  req)
static

Waiting sending double buffer sync function.

Parameters
reqMPI request parameter

Definition at line 757 of file KNMPIManager.cpp.

References m_SendDoubleAsyncRequest.

758 {
759  MPI_Status status;
760 
761  MPI_Wait(&m_SendDoubleAsyncRequest, &status);
762 }
static MPI_Request m_SendDoubleAsyncRequest
Request for sending double.
Definition: KNMPIManager.h:97

Member Data Documentation

bool CKNMPIManager::m_bMultiLevel = false
staticprivate

Flag for Multilevel MPI group.

Definition at line 106 of file KNMPIManager.h.

Referenced by InitLevel(), and IsMultiLevelMPI().

bool CKNMPIManager::m_bNeedPostOperation = { false, false, false, false, false, false, false, false, false, false }
staticprivate

MPI Level.

Definition at line 100 of file KNMPIManager.h.

Referenced by InitLevel().

bool CKNMPIManager::m_bStartMPI = false
staticprivate

MPI_Init call or not.

Definition at line 89 of file KNMPIManager.h.

Referenced by FinalizeManager(), IsInMPIRoutine(), and SetMPIEnviroment().

MPI_Comm CKNMPIManager::m_deflationComm = MPI_COMM_NULL
staticprivate
MPI_Group CKNMPIManager::m_deflationGroup = MPI_GROUP_EMPTY
staticprivate

MPI Group for Deflation computation.

Definition at line 104 of file KNMPIManager.h.

Referenced by FinalizeManager(), and InitLevel().

MPI_Group CKNMPIManager::m_lanczosGroup = MPI_GROUP_EMPTY
staticprivate

MPI Group for Lanczos computation.

Definition at line 103 of file KNMPIManager.h.

Referenced by FinalizeManager(), and InitLevel().

int CKNMPIManager::m_nCommWorldRank = 0
staticprivate

MPI Rank before split.

Definition at line 87 of file KNMPIManager.h.

Referenced by InitLevel().

int CKNMPIManager::m_nCurrentRank = 0
staticprivate

Getting Lanczos group index.

MPI Rank.

MPI Rank

Definition at line 83 of file KNMPIManager.h.

Referenced by FinalizeManager(), GetCurrentLoadBalanceCount(), GetCurrentRank(), MergeVectorEx_Optimal(), MergeVectorOptimal(), and SetMPIEnviroment().

unsigned int CKNMPIManager::m_nLanczosGroupIndex = 0
staticprivate

MPI Group index for Lanczos group.

Definition at line 105 of file KNMPIManager.h.

Referenced by InitLevel().

unsigned int CKNMPIManager::m_nMPILevel = 1
staticprivate

MPI Level.

Definition at line 99 of file KNMPIManager.h.

int CKNMPIManager::m_nTotalNode = 1
staticprivate
int * CKNMPIManager::m_pBankInfo = NULL
staticprivate

After MPI Split bank infomation.

Definition at line 95 of file KNMPIManager.h.

Referenced by FinalizeManager().

CKNMPIManager::LPCOMPLEX_NUMBER CKNMPIManager::m_pCommBuffer = NULL
staticprivate

Data buffer for MPI Communication.

Load blancing for MPI Communication.

Data buffer for MPI Communication

Definition at line 91 of file KNMPIManager.h.

CKNMPIManager::LPCOMPLEX_NUMBER CKNMPIManager::m_pConvertingBuffer = NULL
staticprivate

Data buffer for Vector converting.

Definition at line 92 of file KNMPIManager.h.

Referenced by ConvertVectorToMPIComplexBuffer().

int * CKNMPIManager::m_pDispls = NULL
staticprivate

Displ for MPI comminication.

Definition at line 96 of file KNMPIManager.h.

Referenced by FinalizeManager(), InitCommunicationBufferMetric(), MergeVector(), MergeVectorEx_Optimal(), and MergeVectorOptimal().

int * CKNMPIManager::m_pLoadBalance = NULL
staticprivate

Load blancing for MPI Communication.

Definition at line 90 of file KNMPIManager.h.

Referenced by FinalizeManager(), GetCurrentLoadBalanceCount(), GetLoadBalanceCount(), LoadBlancing(), and LoadBlancingForLanczos().

int * CKNMPIManager::m_pRecvCount = NULL
staticprivate

Reciving count variable for MPI comminication.

Definition at line 93 of file KNMPIManager.h.

Referenced by FinalizeManager(), InitCommunicationBufferMetric(), MergeVector(), MergeVectorEx_Optimal(), and MergeVectorOptimal().

int * CKNMPIManager::m_pSendCount = NULL
staticprivate

Sending count variable for MPI comminication.

Definition at line 94 of file KNMPIManager.h.

Referenced by FinalizeManager(), and InitCommunicationBufferMetric().

MPI_Request CKNMPIManager::m_ReceiveDoubleAsyncRequest = MPI_REQUEST_NULL
staticprivate

Request for receving double.

Definition at line 98 of file KNMPIManager.h.

Referenced by WaitReceiveDoubleBufferAsync().

MPI_Request CKNMPIManager::m_SendDoubleAsyncRequest = MPI_REQUEST_NULL
staticprivate

Request for sending double.

Definition at line 97 of file KNMPIManager.h.

Referenced by WaitSendDoubleBufferSync().


The documentation for this class was generated from the following files: