iosys->my_comm = iosys->comp_comm;
iosys->io_comm = MPI_COMM_NULL;
iosys->intercomm = MPI_COMM_NULL;
iosys->error_handler = PIO_INTERNAL_ERROR;
iosys->async_interface= false;
iosys->compmaster = false;
iosys->default_rearranger = rearr;
iosys->num_iotasks = num_iotasks;
/* Find MPI rank and number of tasks in comp_comm communicator. */
CheckMPIReturn(MPI_Comm_rank(iosys->comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__);
CheckMPIReturn(MPI_Comm_size(iosys->comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__);
iosys->compmaster = true;
iosys->compmaster = MPI_ROOT;
/* Ensure that settings for number of computation tasks, number
* of IO tasks, and the stride are reasonable. */
if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) {
// This is a serial run with a bad configuration. Set up a single task.
fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n");
if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){
if(iosys->ioranks[i] == iosys->comp_rank)
iosys->ioroot = iosys->ioranks[0];
/* Create an MPI info object. */
CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__);
iosys->info = MPI_INFO_NULL;
if(iosys->comp_rank == iosys->ioranks[0])
iosys->iomaster = MPI_ROOT;
/* Create a group for the computation tasks. */
CheckMPIReturn(MPI_Comm_group(iosys->comp_comm, &(iosys->compgroup)),__FILE__,__LINE__);
/* Create a group for the IO tasks. */
CheckMPIReturn(MPI_Group_incl(iosys->compgroup, iosys->num_iotasks, iosys->ioranks,
&(iosys->iogroup)),__FILE__,__LINE__);
/* Create an MPI communicator for the IO tasks. */
CheckMPIReturn(MPI_Comm_create(iosys->comp_comm, iosys->iogroup, &(iosys->io_comm)),__FILE__,__LINE__);
ios = pio_get_iosystem_from_id(iosysid);
CheckMPIReturn( MPI_Info_set(ios->info, hint, hintval), __FILE__,__LINE__);
/** @ingroup PIO_finalize
* @brief Clean up data structures and exit the pio library.
/** @ingroup PIO_finalize
* Clean up internal data structures, free MPI resources, and exit the
* @param iosysid: the io system ID provided by PIOc_Init_Intracomm().
* @returns 0 for success or non-zero for error.
int PIOc_finalize(const int iosysid)
iosystem_desc_t *ios, *nios;
ios = pio_get_iosystem_from_id(iosysid);
/* FIXME: The memory for ioranks is allocated in C only for intracomms
* Remove this check once mem allocs for ioranks completely moves to the
if(ios->intercomm == MPI_COMM_NULL){
if(ios->ioranks != NULL){
/* If asynch IO is in use, send the PIO_MSG_EXIT message from the
* comp master to the IO processes. */
if (ios->async_interface && !ios->comp_rank)
mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm);
CheckMPIReturn(mpierr, __FILE__, __LINE__);
/* Free this memory that was allocated in init_intracomm. */
/* Free the buffer pool. */
free_cn_buffer_pool(*ios);
/* Free the MPI groups. */
MPI_Group_free(&(ios->compgroup));
MPI_Group_free(&(ios->iogroup));
if (ios->compgroup != MPI_GROUP_NULL)
MPI_Group_free(&ios->compgroup);
if (ios->iogroup != MPI_GROUP_NULL)
MPI_Group_free(&(ios->iogroup));
/* Free the MPI communicators. */
/* Free the MPI communicators. my_comm is just a copy (but not an
* MPI copy), so does not have to have an MPI_Comm_free() call. */
if(ios->intercomm != MPI_COMM_NULL){
MPI_Comm_free(&(ios->intercomm));
if(ios->io_comm != MPI_COMM_NULL){
MPI_Comm_free(&(ios->io_comm));
if(ios->comp_comm != MPI_COMM_NULL){
MPI_Comm_free(&(ios->comp_comm));
if(ios->union_comm != MPI_COMM_NULL){
MPI_Comm_free(&(ios->union_comm));
/* Delete the iosystem_desc_t data associated with this id. */
return pio_delete_iosystem_from_list(iosysid);
** @brief return a logical indicating whether this task is an iotask
int PIOc_iam_iotask(const int iosysid, bool *ioproc)
ios = pio_get_iosystem_from_id(iosysid);