Source
379
379
ierr = PIO_EIO;
380
380
}
381
381
382
382
if (!ierr)
383
383
{
384
384
iosys->my_comm = iosys->comp_comm;
385
385
iosys->io_comm = MPI_COMM_NULL;
386
386
iosys->intercomm = MPI_COMM_NULL;
387
387
iosys->error_handler = PIO_INTERNAL_ERROR;
388
388
iosys->async_interface= false;
389
-
iosys->compmaster = false;
390
-
iosys->iomaster = false;
389
+
iosys->compmaster = 0;
390
+
iosys->iomaster = 0;
391
391
iosys->ioproc = false;
392
392
iosys->default_rearranger = rearr;
393
393
iosys->num_iotasks = num_iotasks;
394
394
395
395
ustride = stride;
396
396
397
397
/* Find MPI rank and number of tasks in comp_comm communicator. */
398
398
CheckMPIReturn(MPI_Comm_rank(iosys->comp_comm, &(iosys->comp_rank)),__FILE__,__LINE__);
399
399
CheckMPIReturn(MPI_Comm_size(iosys->comp_comm, &(iosys->num_comptasks)),__FILE__,__LINE__);
400
400
if(iosys->comp_rank==0)
401
-
iosys->compmaster = true;
401
+
iosys->compmaster = MPI_ROOT;
402
402
403
403
/* Ensure that settings for number of computation tasks, number
404
404
* of IO tasks, and the stride are reasonable. */
405
405
if((iosys->num_comptasks == 1) && (num_iotasks*ustride > 1)) {
406
406
// This is a serial run with a bad configuration. Set up a single task.
407
407
fprintf(stderr, "PIO_TP PIOc_Init_Intracomm reset stride and tasks.\n");
408
408
iosys->num_iotasks = 1;
409
409
ustride = 1;
410
410
}
411
411
if((iosys->num_iotasks < 1) || ((iosys->num_iotasks*ustride) > iosys->num_comptasks)){
421
421
if(iosys->ioranks[i] == iosys->comp_rank)
422
422
iosys->ioproc = true;
423
423
}
424
424
iosys->ioroot = iosys->ioranks[0];
425
425
426
426
/* Create an MPI info object. */
427
427
CheckMPIReturn(MPI_Info_create(&(iosys->info)),__FILE__,__LINE__);
428
428
iosys->info = MPI_INFO_NULL;
429
429
430
430
if(iosys->comp_rank == iosys->ioranks[0])
431
-
iosys->iomaster = true;
431
+
iosys->iomaster = MPI_ROOT;
432
432
433
433
/* Create a group for the computation tasks. */
434
434
CheckMPIReturn(MPI_Comm_group(iosys->comp_comm, &(iosys->compgroup)),__FILE__,__LINE__);
435
435
436
436
/* Create a group for the IO tasks. */
437
437
CheckMPIReturn(MPI_Group_incl(iosys->compgroup, iosys->num_iotasks, iosys->ioranks,
438
438
&(iosys->iogroup)),__FILE__,__LINE__);
439
439
440
440
/* Create an MPI communicator for the IO tasks. */
441
441
CheckMPIReturn(MPI_Comm_create(iosys->comp_comm, iosys->iogroup, &(iosys->io_comm)),__FILE__,__LINE__);
482
482
ios = pio_get_iosystem_from_id(iosysid);
483
483
if(ios == NULL)
484
484
return PIO_EBADID;
485
485
if(ios->ioproc)
486
486
CheckMPIReturn( MPI_Info_set(ios->info, hint, hintval), __FILE__,__LINE__);
487
487
488
488
return PIO_NOERR;
489
489
490
490
}
491
491
492
-
/** @ingroup PIO_finalize
493
-
* @brief Clean up data structures and exit the pio library.
492
+
/** @ingroup PIO_finalize
493
+
* Clean up internal data structures, free MPI resources, and exit the
494
+
* pio library.
494
495
*
495
496
* @param iosysid: the io system ID provided by PIOc_Init_Intracomm().
496
497
*
497
498
* @returns 0 for success or non-zero for error.
498
499
*/
499
500
500
501
int PIOc_finalize(const int iosysid)
501
502
{
502
503
iosystem_desc_t *ios, *nios;
504
+
int msg;
505
+
int mpierr;
503
506
504
507
ios = pio_get_iosystem_from_id(iosysid);
505
508
if(ios == NULL)
506
-
return PIO_EBADID;
507
-
/* FIXME: The memory for ioranks is allocated in C only for intracomms
508
-
* Remove this check once mem allocs for ioranks completely moves to the
509
-
* C code
510
-
*/
511
-
if(ios->intercomm == MPI_COMM_NULL){
512
-
if(ios->ioranks != NULL){
513
-
free(ios->ioranks);
514
-
}
509
+
return PIO_EBADID;
510
+
511
+
/* If asynch IO is in use, send the PIO_MSG_EXIT message from the
512
+
* comp master to the IO processes. */
513
+
if (ios->async_interface && !ios->comp_rank)
514
+
{
515
+
msg = PIO_MSG_EXIT;
516
+
mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm);
517
+
CheckMPIReturn(mpierr, __FILE__, __LINE__);
515
518
}
516
519
520
+
/* Free this memory that was allocated in init_intracomm. */
521
+
if (ios->ioranks)
522
+
free(ios->ioranks);
523
+
524
+
/* Free the buffer pool. */
517
525
free_cn_buffer_pool(*ios);
518
526
519
527
/* Free the MPI groups. */
520
-
MPI_Group_free(&(ios->compgroup));
521
-
MPI_Group_free(&(ios->iogroup));
528
+
if (ios->compgroup != MPI_GROUP_NULL)
529
+
MPI_Group_free(&ios->compgroup);
530
+
531
+
if (ios->iogroup != MPI_GROUP_NULL)
532
+
MPI_Group_free(&(ios->iogroup));
522
533
523
-
/* Free the MPI communicators. */
534
+
/* Free the MPI communicators. my_comm is just a copy (but not an
535
+
* MPI copy), so does not have to have an MPI_Comm_free() call. */
524
536
if(ios->intercomm != MPI_COMM_NULL){
525
537
MPI_Comm_free(&(ios->intercomm));
526
538
}
527
539
if(ios->io_comm != MPI_COMM_NULL){
528
540
MPI_Comm_free(&(ios->io_comm));
529
541
}
530
542
if(ios->comp_comm != MPI_COMM_NULL){
531
543
MPI_Comm_free(&(ios->comp_comm));
532
544
}
533
545
if(ios->union_comm != MPI_COMM_NULL){
534
546
MPI_Comm_free(&(ios->union_comm));
535
547
}
536
548
549
+
/* Delete the iosystem_desc_t data associated with this id. */
537
550
return pio_delete_iosystem_from_list(iosysid);
538
-
539
-
540
551
}
541
552
542
553
/**
543
554
** @brief return a logical indicating whether this task is an iotask
544
555
*/
545
556
int PIOc_iam_iotask(const int iosysid, bool *ioproc)
546
557
{
547
558
iosystem_desc_t *ios;
548
559
ios = pio_get_iosystem_from_id(iosysid);
549
560
if(ios == NULL)