Class for partitioning multiple levels of parallelism and managing message passing within these levels. More...
Public Member Functions | |
ParallelLibrary (int &argc, char **&argv) | |
stand-alone mode constructor | |
ParallelLibrary () | |
default library mode constructor (assumes MPI_COMM_WORLD) | |
ParallelLibrary (MPI_Comm dakota_mpi_comm) | |
library mode constructor accepting communicator | |
ParallelLibrary (const std::string &dummy) | |
dummy constructor (used for dummy_lib) | |
~ParallelLibrary () | |
destructor | |
const ParallelLevel & | init_iterator_communicators (const int &iterator_servers, const int &procs_per_iterator, const int &max_iterator_concurrency, const std::string &default_config, const std::string &iterator_scheduling) |
split MPI_COMM_WORLD into iterator communicators | |
const ParallelLevel & | init_evaluation_communicators (const int &evaluation_servers, const int &procs_per_evaluation, const int &max_evaluation_concurrency, const int &asynch_local_evaluation_concurrency, const std::string &default_config, const std::string &evaluation_scheduling) |
split an iterator communicator into evaluation communicators | |
const ParallelLevel & | init_analysis_communicators (const int &analysis_servers, const int &procs_per_analysis, const int &max_analysis_concurrency, const int &asynch_local_analysis_concurrency, const std::string &default_config, const std::string &analysis_scheduling) |
split an evaluation communicator into analysis communicators | |
void | free_iterator_communicators () |
deallocate iterator communicators | |
void | free_evaluation_communicators () |
deallocate evaluation communicators | |
void | free_analysis_communicators () |
deallocate analysis communicators | |
void | print_configuration () |
print the parallel level settings for a particular parallel configuration | |
void | specify_outputs_restart (CommandLineHandler &cmd_line_handler) |
specify output streams and restart file(s) using command line inputs (normal mode) | |
void | specify_outputs_restart (const char *clh_std_output_filename=NULL, const char *clh_std_error_filename=NULL, const char *clh_read_restart_filename=NULL, const char *clh_write_restart_filename=NULL, int stop_restart_evals=0, bool pre_run_flag=false) |
specify output streams and restart file(s) using external inputs (library mode). | |
void | manage_outputs_restart (const ParallelLevel &pl) |
manage output streams and restart file(s) (both modes) | |
void | close_streams () |
close streams, files, and any other services | |
void | abort_helper (int code) const |
finalize MPI with correct communicator for abort | |
void | output_helper (const std::string &s, std::ostream &outfile=Cout) const |
perform stdout on rank 0 only | |
bool | command_line_check () const |
return checkFlag | |
bool | command_line_pre_run () const |
return preRunFlag | |
bool | command_line_run () const |
return runFlag | |
bool | command_line_post_run () const |
return postRunFlag | |
bool | command_line_user_modes () const |
return userModesFlag | |
const std::string & | command_line_pre_run_input () const |
preRunInput filename | |
const std::string & | command_line_pre_run_output () const |
preRunOutput filename | |
const std::string & | command_line_run_input () const |
runInput filename | |
const std::string & | command_line_run_output () const |
runOutput filename | |
const std::string & | command_line_post_run_input () const |
postRunInput filename | |
const std::string & | command_line_post_run_output () const |
postRunOutput fname | |
void | send_si (int &send_int, int dest, int tag) |
blocking send at the strategy-iterator communication level | |
void | recv_si (int &recv_int, int source, int tag, MPI_Status &status) |
blocking receive at the strategy-iterator communication level | |
void | send_si (MPIPackBuffer &send_buff, int dest, int tag) |
blocking send at the strategy-iterator communication level | |
void | isend_si (MPIPackBuffer &send_buff, int dest, int tag, MPI_Request &send_req) |
nonblocking send at the strategy-iterator communication level | |
void | recv_si (MPIUnpackBuffer &recv_buff, int source, int tag, MPI_Status &status) |
blocking receive at the strategy-iterator communication level | |
void | irecv_si (MPIUnpackBuffer &recv_buff, int source, int tag, MPI_Request &recv_req) |
nonblocking receive at the strategy-iterator communication level | |
void | send_ie (MPIPackBuffer &send_buff, int dest, int tag) |
blocking send at the iterator-evaluation communication level | |
void | isend_ie (MPIPackBuffer &send_buff, int dest, int tag, MPI_Request &send_req) |
nonblocking send at the iterator-evaluation communication level | |
void | recv_ie (MPIUnpackBuffer &recv_buff, int source, int tag, MPI_Status &status) |
blocking receive at the iterator-evaluation communication level | |
void | irecv_ie (MPIUnpackBuffer &recv_buff, int source, int tag, MPI_Request &recv_req) |
nonblocking receive at the iterator-evaluation communication level | |
void | send_ea (int &send_int, int dest, int tag) |
blocking send at the evaluation-analysis communication level | |
void | isend_ea (int &send_int, int dest, int tag, MPI_Request &send_req) |
nonblocking send at the evaluation-analysis communication level | |
void | recv_ea (int &recv_int, int source, int tag, MPI_Status &status) |
blocking receive at the evaluation-analysis communication level | |
void | irecv_ea (int &recv_int, int source, int tag, MPI_Request &recv_req) |
nonblocking receive at the evaluation-analysis communication level | |
void | bcast_w (int &data) |
broadcast an integer across MPI_COMM_WORLD | |
void | bcast_i (int &data) |
broadcast an integer across an iterator communicator | |
void | bcast_i (short &data) |
broadcast a short integer across an iterator communicator | |
void | bcast_e (int &data) |
broadcast an integer across an evaluation communicator | |
void | bcast_a (int &data) |
broadcast an integer across an analysis communicator | |
void | bcast_si (int &data) |
broadcast an integer across a strategy-iterator intra communicator | |
void | bcast_w (MPIPackBuffer &send_buff) |
broadcast a packed buffer across MPI_COMM_WORLD | |
void | bcast_i (MPIPackBuffer &send_buff) |
broadcast a packed buffer across an iterator communicator | |
void | bcast_e (MPIPackBuffer &send_buff) |
broadcast a packed buffer across an evaluation communicator | |
void | bcast_a (MPIPackBuffer &send_buff) |
broadcast a packed buffer across an analysis communicator | |
void | bcast_si (MPIPackBuffer &send_buff) |
broadcast a packed buffer across a strategy-iterator intra communicator | |
void | bcast_w (MPIUnpackBuffer &recv_buff) |
matching receive for packed buffer broadcast across MPI_COMM_WORLD | |
void | bcast_i (MPIUnpackBuffer &recv_buff) |
matching receive for packed buffer bcast across an iterator communicator | |
void | bcast_e (MPIUnpackBuffer &recv_buff) |
matching receive for packed buffer bcast across an evaluation communicator | |
void | bcast_a (MPIUnpackBuffer &recv_buff) |
matching receive for packed buffer bcast across an analysis communicator | |
void | bcast_si (MPIUnpackBuffer &recv_buff) |
matching recv for packed buffer bcast across a strat-iterator intra comm | |
void | barrier_w () |
enforce MPI_Barrier on MPI_COMM_WORLD | |
void | barrier_i () |
enforce MPI_Barrier on an iterator communicator | |
void | barrier_e () |
enforce MPI_Barrier on an evaluation communicator | |
void | barrier_a () |
enforce MPI_Barrier on an analysis communicator | |
void | reduce_sum_ea (double *local_vals, double *sum_vals, const int &num_vals) |
compute a sum over an eval-analysis intra-communicator using MPI_Reduce | |
void | reduce_sum_a (double *local_vals, double *sum_vals, const int &num_vals) |
compute a sum over an analysis communicator using MPI_Reduce | |
void | test (MPI_Request &request, int &test_flag, MPI_Status &status) |
test a nonblocking send/receive request for completion | |
void | wait (MPI_Request &request, MPI_Status &status) |
wait for a nonblocking send/receive request to complete | |
void | waitall (const int &num_recvs, MPI_Request *&recv_reqs) |
wait for all messages from a series of nonblocking receives | |
void | waitsome (const int &num_sends, MPI_Request *&recv_requests, int &num_recvs, int *&index_array, MPI_Status *&status_array) |
wait for at least one message from a series of nonblocking receives but complete all that are available | |
void | free (MPI_Request &request) |
free an MPI_Request | |
const int & | world_size () const |
return worldSize | |
const int & | world_rank () const |
return worldRank | |
bool | mpirun_flag () const |
return mpirunFlag | |
bool | is_null () const |
return dummyFlag | |
Real | parallel_time () const |
returns current MPI wall clock time | |
void | parallel_configuration_iterator (const ParConfigLIter &pc_iter) |
set the current ParallelConfiguration node | |
const ParConfigLIter & | parallel_configuration_iterator () const |
return the current ParallelConfiguration node | |
const ParallelConfiguration & | parallel_configuration () const |
return the current ParallelConfiguration instance | |
size_t | num_parallel_configurations () const |
returns the number of entries in parallelConfigurations | |
bool | parallel_configuration_is_complete () |
identifies if the current ParallelConfiguration has been fully populated | |
void | increment_parallel_configuration () |
add a new node to parallelConfigurations and increment currPCIter | |
bool | w_parallel_level_defined () const |
test current parallel configuration for definition of world parallel level | |
bool | si_parallel_level_defined () const |
test current parallel configuration for definition of strategy-iterator parallel level | |
bool | ie_parallel_level_defined () const |
test current parallel configuration for definition of iterator-evaluation parallel level | |
bool | ea_parallel_level_defined () const |
test current parallel configuration for definition of evaluation-analysis parallel level | |
std::vector< MPI_Comm > | analysis_intra_communicators () |
return the set of analysis intra communicators for all parallel configurations (used for setting up direct simulation interfaces prior to execution time). | |
Static Public Member Functions | |
static bool | detect_parallel_launch (int &argc, char **&argv) |
detect parallel launch of DAKOTA using mpirun/mpiexec/poe/etc. based on command line arguments and environment variables | |
Private Member Functions | |
void | init_mpi_comm (MPI_Comm dakota_mpi_comm) |
convenience function for initializing from specific comm | |
void | init_communicators (const ParallelLevel &parent_pl, const int &num_servers, const int &procs_per_server, const int &max_concurrency, const int &asynch_local_concurrency, const std::string &default_config, const std::string &scheduling_override) |
split a parent communicator into child server communicators | |
void | free_communicators (ParallelLevel &pl) |
deallocate intra/inter communicators for a particular ParallelLevel | |
bool | split_communicator_dedicated_master (const ParallelLevel &parent_pl, ParallelLevel &child_pl) |
split a parent communicator into a dedicated master processor and num_servers child communicators | |
bool | split_communicator_peer_partition (const ParallelLevel &parent_pl, ParallelLevel &child_pl) |
split a parent communicator into num_servers peer child communicators (no dedicated master processor) | |
bool | resolve_inputs (int &num_servers, int &procs_per_server, const int &avail_procs, int &proc_remainder, const int &max_concurrency, const int &capacity_multiplier, const std::string &default_config, const std::string &scheduling_override, bool print_rank) |
resolve user inputs into a sensible partitioning scheme | |
void | send (MPIPackBuffer &send_buff, const int &dest, const int &tag, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
blocking buffer send at the current communication level | |
void | send (int &send_int, const int &dest, const int &tag, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
blocking integer send at the current communication level | |
void | isend (MPIPackBuffer &send_buff, const int &dest, const int &tag, MPI_Request &send_req, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
nonblocking buffer send at the current communication level | |
void | isend (int &send_int, const int &dest, const int &tag, MPI_Request &send_req, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
nonblocking integer send at the current communication level | |
void | recv (MPIUnpackBuffer &recv_buff, const int &source, const int &tag, MPI_Status &status, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
blocking buffer receive at the current communication level | |
void | recv (int &recv_int, const int &source, const int &tag, MPI_Status &status, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
blocking integer receive at the current communication level | |
void | irecv (MPIUnpackBuffer &recv_buff, const int &source, const int &tag, MPI_Request &recv_req, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
nonblocking buffer receive at the current communication level | |
void | irecv (int &recv_int, const int &source, const int &tag, MPI_Request &recv_req, ParallelLevel &parent_pl, ParallelLevel &child_pl) |
nonblocking integer receive at the current communication level | |
void | bcast (int &data, const MPI_Comm &comm) |
broadcast an integer across a communicator | |
void | bcast (short &data, const MPI_Comm &comm) |
broadcast a short integer across a communicator | |
void | bcast (MPIPackBuffer &send_buff, const MPI_Comm &comm) |
send a packed buffer across a communicator using a broadcast | |
void | bcast (MPIUnpackBuffer &recv_buff, const MPI_Comm &comm) |
matching receive for a packed buffer broadcast | |
void | barrier (const MPI_Comm &comm) |
enforce MPI_Barrier on comm | |
void | reduce_sum (double *local_vals, double *sum_vals, const int &num_vals, const MPI_Comm &comm) |
compute a sum over comm using MPI_Reduce | |
void | check_error (const std::string &err_source, const int &err_code) |
check the MPI return code and abort if error | |
void | manage_run_modes (CommandLineHandler &cmd_line_handler) |
manage run mode information from command-line handler | |
void | split_filenames (const char *filenames, std::string &input_filename, std::string &output_filename) |
split a double colon separated pair of filenames (possibly empty) into input and output filename strings | |
Private Attributes | |
std::ofstream | output_ofstream |
tagged file redirection of stdout | |
std::ofstream | error_ofstream |
tagged file redirection of stderr | |
MPI_Comm | dakotaMPIComm |
MPI_Comm on which DAKOTA is running. | |
int | worldRank |
rank in MPI_Comm in which DAKOTA is running | |
int | worldSize |
size of MPI_Comm in which DAKOTA is running | |
bool | mpirunFlag |
flag for a parallel mpirun/yod launch | |
bool | ownMPIFlag |
flag for ownership of MPI_Init/MPI_Finalize | |
bool | dummyFlag |
prevents multiple MPI_Finalize calls due to dummy_lib | |
bool | stdOutputToFile |
flags redirection of DAKOTA std output to a file | |
bool | stdErrorToFile |
flags redirection of DAKOTA std error to a file | |
bool | checkFlag |
flags invocation with command line option -check | |
bool | preRunFlag |
flags invocation with command line option -pre_run | |
bool | runFlag |
flags invocation with command line option -run | |
bool | postRunFlag |
flags invocation with command line option -post_run | |
bool | userModesFlag |
whether user run mdoes are active | |
std::string | preRunInput |
filename for pre_run input | |
std::string | preRunOutput |
filename for pre_run output | |
std::string | runInput |
filename for run input | |
std::string | runOutput |
filename for run output | |
std::string | postRunInput |
filename for post_run input | |
std::string | postRunOutput |
filename for post_run output | |
Real | startCPUTime |
start reference for UTILIB CPU timer | |
Real | startWCTime |
start reference for UTILIB wall clock timer | |
Real | startMPITime |
start reference for MPI wall clock timer | |
long | startClock |
start reference for local clock() timer measuring < parent+child CPU | |
std::string | stdOutputFilename |
filename for redirection of stdout | |
std::string | stdErrorFilename |
filename for redirection of stderr | |
std::string | readRestartFilename |
input filename for restart | |
std::string | writeRestartFilename |
output filename for restart | |
int | stopRestartEvals |
number of evals at which to stop restart processing | |
std::list< ParallelLevel > | parallelLevels |
the complete set of parallelism levels for managing multilevel parallelism among one or more configurations | |
std::list< ParallelConfiguration > | parallelConfigurations |
the set of parallel configurations which manage list iterators for indexing into parallelLevels | |
ParLevLIter | currPLIter |
list iterator identifying the current node in parallelLevels | |
ParConfigLIter | currPCIter |
list iterator identifying the current node in parallelConfigurations |
Class for partitioning multiple levels of parallelism and managing message passing within these levels.
The ParallelLibrary class encapsulates all of the details of performing message passing within multiple levels of parallelism. It provides functions for partitioning of levels according to user configuration input and functions for passing messages within and across MPI communicators for each of the parallelism levels. If support for other message-passing libraries beyond MPI becomes needed (PVM, ...), then ParallelLibrary would be promoted to a base class with virtual functions to encapsulate the library-specific syntax.
ParallelLibrary | ( | int & | argc, |
char **& | argv | ||
) |
stand-alone mode constructor
This constructor is the one used by main.C. It calls MPI_Init conditionally based on whether a parallel launch is detected.
References Dakota::abort_handler(), ParallelLibrary::currPLIter, Dakota::Dak_pl, ParallelLibrary::detect_parallel_launch(), ParallelLibrary::increment_parallel_configuration(), ParallelLibrary::mpirunFlag, ParallelLibrary::ownMPIFlag, ParallelLibrary::parallelLevels, ParallelLevel::serverCommRank, ParallelLevel::serverCommSize, ParallelLevel::serverIntraComm, Dakota::start_dakota_heartbeat(), ParallelLibrary::startClock, ParallelLibrary::startCPUTime, ParallelLibrary::startMPITime, ParallelLibrary::startWCTime, ParallelLibrary::worldRank, and ParallelLibrary::worldSize.
ParallelLibrary | ( | ) |
default library mode constructor (assumes MPI_COMM_WORLD)
This constructor provides a library mode default ParallelLibrary. It does not call MPI_Init, but rather gathers data from MPI_COMM_WORLD if MPI_Init has been called elsewhere.
References ParallelLibrary::dakotaMPIComm, and ParallelLibrary::init_mpi_comm().
ParallelLibrary | ( | MPI_Comm | dakota_mpi_comm | ) |
library mode constructor accepting communicator
This constructor provides a library mode ParallelLibrary, accepting an MPI communicator that might not be MPI_COMM_WORLD. It does not call MPI_Init, but rather gathers data from dakota_mpi_comm if MPI_Init has been called elsewhere.
References ParallelLibrary::dakotaMPIComm, and ParallelLibrary::init_mpi_comm().
ParallelLibrary | ( | const std::string & | dummy | ) |
dummy constructor (used for dummy_lib)
This constructor is used for creation of the global dummy_lib object, which is used to satisfy initialization requirements when the real ParallelLibrary object is not available.
void specify_outputs_restart | ( | CommandLineHandler & | cmd_line_handler | ) |
specify output streams and restart file(s) using command line inputs (normal mode)
On the rank 0 processor, get the -output, -error, -read_restart, and -write_restart filenames and the -stop_restart limit from the command line. Defaults for the filenames from the command line handler are NULL for the filenames except write which defaults to dakota.rst and 0 for read_restart_evals if no user specification. This information is Bcast from rank 0 to all iterator masters in manage_outputs_restart().
References ParallelLibrary::manage_run_modes(), CommandLineHandler::read_restart_evals(), ParallelLibrary::readRestartFilename, GetLongOpt::retrieve(), ParallelLibrary::stdErrorFilename, ParallelLibrary::stdOutputFilename, ParallelLibrary::stopRestartEvals, ParallelLibrary::worldRank, and ParallelLibrary::writeRestartFilename.
Referenced by main(), and run_dakota().
void specify_outputs_restart | ( | const char * | clh_std_output_filename = NULL , |
const char * | clh_std_error_filename = NULL , |
||
const char * | clh_read_restart_filename = NULL , |
||
const char * | clh_write_restart_filename = NULL , |
||
int | stop_restart_evals = 0 , |
||
bool | pre_run_flag = false |
||
) |
specify output streams and restart file(s) using external inputs (library mode).
Rather than extracting from the command line, pass the std output, std error, read restart, and write restart filenames and the stop restart limit directly. This function only needs to be invoked to specify non-default values [defaults for the filenames are NULL (resulting in no output redirection, no restart read, and default restart write) and 0 for the stop restart limit (resulting in no restart read limit)].
References ParallelLibrary::postRunFlag, ParallelLibrary::preRunFlag, ParallelLibrary::readRestartFilename, ParallelLibrary::runFlag, ParallelLibrary::stdErrorFilename, ParallelLibrary::stdOutputFilename, ParallelLibrary::stopRestartEvals, ParallelLibrary::userModesFlag, and ParallelLibrary::writeRestartFilename.
void manage_outputs_restart | ( | const ParallelLevel & | pl | ) |
manage output streams and restart file(s) (both modes)
If the user has specified the use of files for DAKOTA standard output and/or standard error, then bind these filenames to the Cout/Cerr macros. In addition, if concurrent iterators are to be used, create and tag multiple output streams in order to prevent jumbled output. Manage restart file(s) by processing any incoming evaluations from an old restart file and by setting up the binary output stream for new evaluations. Only master iterator processor(s) read & write restart information. This function must follow init_iterator_communicators so that restart can be managed properly for concurrent iterator strategies. In the case of concurrent iterators, each iterator has its own restart file tagged with iterator number.
References Dakota::abort_handler(), ParallelLibrary::bcast(), ParallelLibrary::checkFlag, Dakota::dakota_cerr, Dakota::dakota_cout, Dakota::data_pairs, ParallelLevel::dedicatedMasterFlag, ParallelLibrary::error_ofstream, ParallelLevel::hubServerCommSize, ParallelLevel::hubServerIntraComm, ParallelLevel::numServers, ParallelLibrary::output_ofstream, ParallelLibrary::postRunFlag, ParallelLibrary::postRunInput, ParallelLibrary::postRunOutput, ParallelLibrary::preRunFlag, ParallelLibrary::preRunInput, ParallelLibrary::preRunOutput, ParallelLibrary::readRestartFilename, ParallelLibrary::runFlag, ParallelLibrary::runInput, ParallelLibrary::runOutput, ParallelLevel::serverCommRank, ParallelLevel::serverId, ParallelLevel::serverMasterFlag, MPIPackBuffer::size(), ParallelLibrary::stdErrorFilename, ParallelLibrary::stdErrorToFile, ParallelLibrary::stdOutputFilename, ParallelLibrary::stdOutputToFile, ParallelLibrary::stopRestartEvals, ParallelLibrary::userModesFlag, ParallelLibrary::worldRank, Dakota::write_restart, and ParallelLibrary::writeRestartFilename.
Referenced by Strategy::init_iterator_parallelism().
void close_streams | ( | ) |
close streams, files, and any other services
Close streams associated with manage_outputs and manage_restart and terminate any additional services that may be active.
References Dakota::abort_handler(), ParallelLibrary::currPCIter, Dakota::dakota_cerr, Dakota::dakota_cout, Dakota::dc_ptr_int, ParallelLibrary::error_ofstream, Dakota::mc_ptr_int, ParallelLibrary::output_ofstream, ParallelLevel::serverMasterFlag, ParallelLibrary::stdErrorToFile, ParallelLibrary::stdOutputToFile, and Dakota::write_restart.
Referenced by ParallelLibrary::~ParallelLibrary().
void increment_parallel_configuration | ( | ) | [inline] |
add a new node to parallelConfigurations and increment currPCIter
Called from the ParallelLibrary ctor and from Model::init_communicators(). An increment is performed for each Model initialization except the first (which inherits the world and strategy-iterator parallel levels from the first partial configuration).
References ParallelLibrary::currPCIter, ParallelConfiguration::eaPLIter, ParallelConfiguration::iePLIter, ParallelConfiguration::numParallelLevels, ParallelLibrary::parallelConfigurations, ParallelLibrary::parallelLevels, ParallelConfiguration::siPLIter, ParallelLibrary::worldSize, and ParallelConfiguration::wPLIter.
Referenced by Model::init_communicators(), ParallelLibrary::init_mpi_comm(), and ParallelLibrary::ParallelLibrary().
void init_mpi_comm | ( | MPI_Comm | dakota_mpi_comm | ) | [private] |
convenience function for initializing from specific comm
shared function for initializing based on passed MPI_Comm
References ParallelLibrary::currPLIter, Dakota::Dak_pl, ParallelLibrary::increment_parallel_configuration(), ParallelLibrary::mpirunFlag, ParallelLibrary::parallelLevels, ParallelLevel::serverCommRank, ParallelLevel::serverCommSize, ParallelLevel::serverIntraComm, Dakota::start_dakota_heartbeat(), ParallelLibrary::startClock, ParallelLibrary::startCPUTime, ParallelLibrary::startMPITime, ParallelLibrary::startWCTime, ParallelLibrary::worldRank, and ParallelLibrary::worldSize.
Referenced by ParallelLibrary::ParallelLibrary().
void init_communicators | ( | const ParallelLevel & | parent_pl, |
const int & | num_servers, | ||
const int & | procs_per_server, | ||
const int & | max_concurrency, | ||
const int & | asynch_local_concurrency, | ||
const std::string & | default_config, | ||
const std::string & | scheduling_override | ||
) | [private] |
split a parent communicator into child server communicators
Split parent communicator into concurrent child server partitions as specified by the passed parameters. This constructs new child intra-communicators and parent-child inter-communicators. This function is called from the Strategy constructor for the concurrent iterator level and from ApplicationInterface::init_communicators() for the concurrent evaluation and concurrent analysis levels.
References ParallelLevel::commSplitFlag, ParallelLibrary::currPCIter, ParallelLibrary::currPLIter, ParallelLevel::dedicatedMasterFlag, ParallelLevel::numServers, ParallelLibrary::parallelLevels, ParallelLevel::procRemainder, ParallelLevel::procsPerServer, ParallelLibrary::resolve_inputs(), ParallelLevel::serverCommRank, ParallelLevel::serverCommSize, ParallelLibrary::split_communicator_dedicated_master(), and ParallelLibrary::split_communicator_peer_partition().
Referenced by ParallelLibrary::init_analysis_communicators(), ParallelLibrary::init_evaluation_communicators(), and ParallelLibrary::init_iterator_communicators().
bool resolve_inputs | ( | int & | num_servers, |
int & | procs_per_server, | ||
const int & | avail_procs, | ||
int & | proc_remainder, | ||
const int & | max_concurrency, | ||
const int & | capacity_multiplier, | ||
const std::string & | default_config, | ||
const std::string & | scheduling_override, | ||
bool | print_rank | ||
) | [private] |
resolve user inputs into a sensible partitioning scheme
This function is responsible for the "auto-configure" intelligence of DAKOTA. It resolves a variety of inputs and overrides into a sensible partitioning configuration for a particular parallelism level. It also handles the general case in which a user's specification request does not divide out evenly with the number of available processors for the level. If num_servers & procs_per_server are both nondefault, then the former takes precedence.
Referenced by ParallelLibrary::init_communicators().
void split_filenames | ( | const char * | filenames, |
std::string & | input_filename, | ||
std::string & | output_filename | ||
) | [private] |
split a double colon separated pair of filenames (possibly empty) into input and output filename strings
Tokenize colon-delimited input and output filenames, returns unchanged strings if tokens not found.
Referenced by ParallelLibrary::manage_run_modes().