Newer
Older
if( endStream != MacroGridMoverIF :: ENDSTREAM )
{
cerr << "**ERROR: writeStaticState: inconsistent stream " << endl;
assert( false );
abort();
}
}
}
catch (Parallel :: AccessPllException)
{
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
cerr << " FEHLER Parallel :: AccessPllException entstanden in: " << __FILE__ << " " << __LINE__ << endl ;
}
assert (debugOption (20) ? (cout << "**INFO GitterPll :: exchangeDynamicState () used "
<< (float)(clock () - start)/(float)(CLOCKS_PER_SEC) << " sec. " << endl, 1) : 1 ) ;
}
{
//struct mallinfo minfo = mallinfo();
//cerr << "Ende exchangeDynamicState(): Blocks allocated: " << minfo.usmblks + minfo.uordblks << " "
// << " Blocks used: " << minfo.usmblks + minfo.uordblks - mallocedsize << endl;
}
return ;
}
void GitterPll :: exchangeStaticState () {
// Die Methode wird jedesmal aufgerufen, wenn sich der statische
// Zustand (d.h. der Zustand, der mit dem Makrogitter verbunden ist)
// ge"andert hat: Makrogitteraufbau und Lastvertielung. Der statische
// Zustand darf durch Verfeinerung und h"ohere Methoden nicht beeinflusst
// sein.
const int start = clock () ;
try {
const int nl = mpAccess ().nlinks () ;
vector < ObjectStream > osv (nl) ;
{
for (int l = 0 ; l < nl ; ++l)
AccessIteratorTT < hface_STI > :: InnerHandle wi (containerPll (),l) ;
AccessIteratorTT < hface_STI > :: OuterHandle wo (containerPll (),l) ;
for (wi.first () ; ! wi.done () ; wi.next ())
{
pair < ElementPllXIF_t *, int > p = wi.item ().accessInnerPllX () ;
}
for (wo.first () ; ! wo.done () ; wo.next ())
{
pair < ElementPllXIF_t *, int > p = wo.item ().accessInnerPllX () ;
osv = mpAccess ().exchange (osv) ;
{
for (int l = 0 ; l < nl ; ++l)
{
AccessIteratorTT < hface_STI > :: InnerHandle wi (containerPll (),l) ;
AccessIteratorTT < hface_STI > :: OuterHandle wo (containerPll (),l) ;
for (wo.first () ; ! wo.done () ; wo.next ())
{
pair < ElementPllXIF_t *, int > p = wo.item ().accessOuterPllX () ;
}
for (wi.first () ; ! wi.done () ; wi.next ())
{
pair < ElementPllXIF_t *, int > p = wi.item ().accessOuterPllX () ;
// check consistency of stream
int endStream ;
if( endStream != MacroGridMoverIF :: ENDSTREAM )
{
if( endStream != MacroGridMoverIF :: ENDSTREAM )
{
cerr << "**ERROR: writeStaticState: inconsistent stream, got " << endStream << endl;
assert( false );
abort();
}
}
}
}
}
catch (Parallel :: AccessPllException)
{
cerr << " FEHLER Parallel :: AccessPllException entstanden in: " << __FILE__ << " " << __LINE__ << endl ;
}
assert (debugOption (20) ? (cout << "**INFO GitterPll :: exchangeStaticState () used "
<< (float)(clock () - start)/(float)(CLOCKS_PER_SEC) << " sec. " << endl, 1) : 1 ) ;
return ;
}
bool GitterPll :: checkPartitioning( LoadBalancer :: DataBase& db )
assert (debugOption (20) ? (cout << "**GitterPll :: checkPartitioning ( db ) " << endl, 1) : 1) ;
{
AccessIterator < hface_STI > :: Handle w (containerPll ()) ;
for (w.first () ; ! w.done () ; w.next ()) w.item ().ldbUpdateGraphEdge (db) ;
}
{
AccessIterator < helement_STI > :: Handle w (containerPll ()) ;
for (w.first () ; ! w.done () ; w.next ()) w.item ().ldbUpdateGraphVertex (db) ;
const int np = mpAccess ().psize () ;
bool neu = false ;
{
// Kriterium, wann eine Lastneuverteilung vorzunehmen ist:
//
// load - own load
// mean - mean load of elements
// minload - minmal load
// maxload - maximal load
const double myload = db.accVertexLoad () ;
// get: min(myload), max(myload), sum(myload)
MpAccessLocal :: minmaxsum_t load = mpAccess ().minmaxsum( myload );
// get mean value of leaf elements
const double mean = load.sum / double( np );
/*
// old version using Allgather
vector < double > v (mpAccess ().gcollect (load)) ;
const vector < double > :: iterator iEnd = v.end () ;
// sum up values and devide by number of cores
const double mean = accumulate (v.begin (), v.end (), 0.0) / double (np) ;
std::cout << mean << " mean value " << std::endl;
for (vector < double > :: iterator i = v.begin () ; i != iEnd ; ++i)
neu |= (*i > mean ? (*i > (_ldbOver * mean) ? true : false) : (*i < (_ldbUnder * mean) ? true : false)) ;
//std::cout << mean << " mean value " << minload << " minload " << maxload << std::endl;
if( load.max > (_ldbOver * mean) || load.min < (_ldbUnder * mean) )
#ifndef NDEBUG
// make sure every process has the same value of neu
const bool checkNeu = mpAccess().gmax( neu );
assert( neu == checkNeu );
#endif
return neu;
}
void GitterPll :: loadBalancerGridChangesNotify ()
{
// create load balancer data base
LoadBalancer :: DataBase db ;
// check whether we have to repartition
const bool neu = checkPartitioning( db );
// if repartioning necessary, do it
const int ldbMth = int( _ldbMethod );
#ifndef NDEBUG
// make sure every process has the same ldb method
int checkMth = mpAccess ().gmax( ldbMth );
assert( checkMth == ldbMth );
#endif
if( ldbMth )
tovtk("pre.vtu");
repartitionMacroGrid (db) ;
tovtk("post.vtu");
notifyMacroGridChanges () ;
}
}
return ;
}
void GitterPll :: loadBalancerMacroGridChangesNotify ()
{
// Diese Methode beschreibt die Reaktion des Lastverteilers bzw.
// seiner Datengrundlage auf "Anderungen des Grobgitters, d.h.
// auf "Anderungen in der Grobgitterverteilung, Gr"osse usw.
assert (debugOption (20) ? (cout << "**INFO GitterPll :: loadBalancerMacroGridChangesNotify () " << endl, 1) : 1) ;
int cnt = 0 ;
AccessIterator < helement_STI > :: Handle w ( containerPll () ) ;
// get number of macro elements
const int macroElements = w.size () ;
// sum up for each process and and substract macroElements again
cnt = mpAccess ().scan( macroElements ) - macroElements ;
#ifndef NDEBUG
// make sure that we get the same value as before
//std::cout << "P[ " << mpAccess().myrank() << " ] cnt = " << cnt << std::endl;
{
int oldcnt = 0;
// get sizes from all processes
vector < int > sizes = mpAccess ().gcollect ( macroElements ) ;
// count sizes for all processors with a rank lower than mine
for (int i = 0 ; i < mpAccess ().myrank () ; oldcnt += sizes [ i++ ]) ;
assert( oldcnt == cnt );
}
#endif
// set ldb vertex indices to all elements
for (w.first () ; ! w.done () ; w.next (), ++ cnt )
{
w.item ().setLoadBalanceVertexIndex ( cnt ) ;
}
return ;
}
void GitterPll :: notifyGridChanges () {
assert (debugOption (20) ? (cout << "**INFO GitterPll :: notifyGridChanges () " << endl, 1) : 1 ) ;
Gitter :: notifyGridChanges () ;
exchangeDynamicState () ;
return ;
}
void GitterPll :: notifyMacroGridChanges () {
assert (debugOption (20) ? (cout << "**INFO GitterPll :: notifyMacroGridChanges () " << endl, 1) : 1 ) ;
Gitter :: notifyMacroGridChanges () ;
Gitter :: notifyGridChanges () ;
//cout << "Notify done " << endl;
containerPll ().identification (mpAccess ()) ;
//cout << "Ident done " << endl;
//printsize();
//printSizeTT ();
loadBalancerMacroGridChangesNotify () ;
exchangeStaticState () ;
exchangeDynamicState () ;
return ;
}
GitterPll :: GitterPll ( MpAccessLocal & mpa )
: _ldbOver (0.0), _ldbUnder (0.0), _ldbMethod (LoadBalancer :: DataBase :: NONE)
if( mpa.myrank() == 0 )
// set default values
_ldbOver = 1.2;
_ldbMethod = LoadBalancer :: DataBase :: METIS_PartGraphKway ;
ifstream in ("alugrid.cfg") ;
if (in)
in >> _ldbUnder ;
in >> _ldbOver ;
in >> i;
_ldbMethod = (LoadBalancer :: DataBase :: method) i ;
}
else
{
cerr << endl << "**WARNING (ignored) could'nt open file "
<< "Using default values: " << endl ;
cerr << _ldbUnder << " < [balance] < " << _ldbOver << " "
<< " partitioning method \""
<< LoadBalancer :: DataBase :: methodToString (_ldbMethod)
<< "\"" << endl << endl;
}
} // got values on rank 0
// now communicate them
double buff[ 3 ] = { _ldbOver, _ldbUnder, double(_ldbMethod) };
// (much better then to read file on all procs)
const int root = 0 ;
mpa.bcast( &buff[ 0 ], 3, root);
// store values
_ldbOver = buff[ 0 ];
_ldbUnder = buff[ 1 ];
_ldbMethod = (LoadBalancer :: DataBase :: method ) buff[ 2 ];
// wait for all to finish
#ifndef NDEBUG
mpa.barrier();
#endif
}
#endif