diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index d26cbd7a0..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 2 -jobs: - build: - machine: true - steps: - - checkout - - run: - name: Submodule Init - command: git submodule update --init --recursive - - run: - name: Docker Build - command: cd src/examples/docker/ubuntu/ && ./example_build.sh - no_output_timeout: 30m diff --git a/.github/workflows/build_conduit_gcc.yml b/.github/workflows/build_conduit_gcc.yml index 138ed4a1c..67fe2c640 100644 --- a/.github/workflows/build_conduit_gcc.yml +++ b/.github/workflows/build_conduit_gcc.yml @@ -153,6 +153,11 @@ jobs: run: | echo "**** Building Conduit" cmake --build build -j2 + - name: Run Conduit Unit Tests + run: | + echo "**** Conduit Unit Tests" + export CTEST_OUTPUT_ON_FAILURE=1 + ctest --test-dir build - name: Install Conduit run: | echo "**** Installing Conduit" diff --git a/CHANGELOG.md b/CHANGELOG.md index e87159eba..157c1cb0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,12 @@ and this project aspires to adhere to [Semantic Versioning](https://semver.org/s - Sped up `conduit::blueprint::mesh::topology::unstructured::to_polygonal()` algorithm and added support for mixed element types. - Improved support for "mixed" element types in `conduit::blueprint::mesh::utils::ShapeType` and also removed a string member to speed up construction. +### Changed + +#### Relay +- Updates to use Silo 4.12 and HDF5 2.0.0. +- Reworked HDF5 handle managment to avoid resource leaks with exceptions. + ## [0.9.5] - Released 2025-09-10 ### Added diff --git a/scripts/build_conduit/2025_12_08_h5zzfp-hdf5-cmake-fix.patch b/scripts/build_conduit/2025_12_08_h5zzfp-hdf5-cmake-fix.patch new file mode 100644 index 000000000..71bb779fc --- /dev/null +++ b/scripts/build_conduit/2025_12_08_h5zzfp-hdf5-cmake-fix.patch @@ -0,0 +1,32 @@ +From e3caf7354f1cfb4bd56073cd9d145bb4fff32f1b Mon Sep 17 00:00:00 2001 +From: Cyrus Harrison +Date: Mon, 8 Dec 2025 15:26:12 -0800 +Subject: [PATCH] hdf5 cmake logic fix + +--- + cmake/HDFMacros.cmake | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/cmake/HDFMacros.cmake b/cmake/HDFMacros.cmake +index 5aaacd3..bfe1932 100644 +--- a/cmake/HDFMacros.cmake ++++ b/cmake/HDFMacros.cmake +@@ -97,7 +97,7 @@ macro (HDF5_SUPPORT) + else () + add_definitions (-DH5_BUILT_AS_STATIC_LIB) + endif () +- if (FORTRAN_INTERFACE AND ${HDF5_BUILD_FORTRAN}) ++ if (FORTRAN_INTERFACE) + if (HDF5_shared_Fortran_FOUND) + set (HDF5_FORTRAN_INCLUDE_DIRS ${HDF5_INCLUDE_DIR_FORTRAN}) + set (HDF5_FORTRAN_LIBRARIES ${HDF5_FORTRAN_SHARED_LIBRARY}) +@@ -125,7 +125,7 @@ macro (HDF5_SUPPORT) + else () + set (HDF5_FOUND 0) + endif () +- if (FORTRAN_INTERFACE AND ${HDF5_BUILD_FORTRAN}) ++ if (FORTRAN_INTERFACE) + if (HDF5_shared_Fortran_FOUND) + set (HDF5_FORTRAN_INCLUDE_DIRS ${HDF5_INCLUDE_DIR_FORTRAN}) + set (HDF5_FORTRAN_LIBRARIES ${HDF5_FORTRAN_SHARED_LIBRARY}) +-- diff --git a/scripts/build_conduit/build_conduit.sh b/scripts/build_conduit/build_conduit.sh index 0469c4d2e..5ecca5a7c 100755 --- a/scripts/build_conduit/build_conduit.sh +++ b/scripts/build_conduit/build_conduit.sh @@ -196,10 +196,9 @@ fi # build_zlib ################ # HDF5 ################ -# release 1-2 GAH! -hdf5_version=1.14.1-2 -hdf5_middle_version=1.14.1 -hdf5_short_version=1.14 +hdf5_version=2.0.0 +hdf5_middle_version=2_0_0 +hdf5_short_version=2_0 hdf5_src_dir=$(ospath ${source_dir}/hdf5-${hdf5_version}) hdf5_build_dir=$(ospath ${build_dir}/hdf5-${hdf5_version}/) hdf5_install_dir=$(ospath ${install_dir}/hdf5-${hdf5_version}/) @@ -210,13 +209,14 @@ if [ ! -d ${hdf5_install_dir} ]; then if ${build_hdf5}; then if [ ! -d ${hdf5_src_dir} ]; then echo "**** Downloading ${hdf5_tarball}" - curl -L https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-${hdf5_short_version}/hdf5-${hdf5_middle_version}/src/hdf5-${hdf5_version}.tar.gz -o ${hdf5_tarball} + curl -L https://support.hdfgroup.org/releases/hdf5/v${hdf5_short_version}/v${hdf5_middle_version}/downloads/hdf5-${hdf5_version}.tar.gz -o ${hdf5_tarball} tar ${tar_extra_args} -xzf ${hdf5_tarball} -C ${source_dir} fi + ################# # -# hdf5 1.14.x CMake recipe for using zlib +# hdf5 CMake recipe for using zlib # # -DHDF5_ENABLE_Z_LIB_SUPPORT=ON # Add zlib install dir to CMAKE_PREFIX_PATH @@ -227,7 +227,7 @@ echo "**** Configuring HDF5 ${hdf5_version}" cmake -S ${hdf5_src_dir} -B ${hdf5_build_dir} ${cmake_compiler_settings} \ -DCMAKE_VERBOSE_MAKEFILE:BOOL=${enable_verbose} \ -DCMAKE_BUILD_TYPE=${build_config} \ - -DHDF5_ENABLE_Z_LIB_SUPPORT=ON \ + -DHDF5_ENABLE_ZLIB_SUPPORT:BOOL=ON \ -DCMAKE_PREFIX_PATH=${zlib_install_dir} \ -DCMAKE_INSTALL_PREFIX=${hdf5_install_dir} @@ -245,7 +245,7 @@ fi # build_hdf5 ################ # Silo ################ -silo_version=4.11.1 +silo_version=4.12.0 silo_src_dir=$(ospath ${source_dir}/Silo-${silo_version}) silo_build_dir=$(ospath ${build_dir}/silo-${silo_version}/) silo_install_dir=$(ospath ${install_dir}/silo-${silo_version}/) @@ -260,17 +260,8 @@ if [ ! -d ${silo_src_dir} ]; then # untar and avoid symlinks (which windows despises) tar ${tar_extra_args} -xzf ${silo_tarball} -C ${source_dir} \ --exclude="Silo-${silo_version}/config-site/*" \ - --exclude="Silo-${silo_version}/README.md" - # apply silo patches - cd ${silo_src_dir} - patch -p1 < ${script_dir}/2024_07_25_silo_4_11_cmake_fix.patch - - # windows specifc patch - if [[ "$build_windows" == "ON" ]]; then - patch -p1 < ${script_dir}/2024_07_29_silo-pr389-win32-bugfix.patch - fi - - cd ${root_dir} + --exclude="Silo-${silo_version}/LICENSE.md" \ + --exclude="Silo-${silo_version}/silo_objects.png" fi @@ -471,6 +462,11 @@ if [ ! -d ${h5zzfp_src_dir} ]; then echo "**** Downloading ${h5zzfp_tarball}" curl -L "https://github.com/LLNL/H5Z-ZFP/archive/refs/tags/v${h5zzfp_version}.tar.gz" -o ${h5zzfp_tarball} tar ${tar_extra_args} -xzf ${h5zzfp_tarball} -C ${source_dir} + + # apply patches + cd ${h5zzfp_src_dir} + patch -p1 < ${script_dir}/2025_12_08_h5zzfp-hdf5-cmake-fix.patch + cd ${root_dir} fi echo "**** Configuring H5Z-ZFP ${h5zzfp_version}" diff --git a/scripts/uberenv_configs/packages/hdf5/find_package_zlib.patch b/scripts/uberenv_configs/packages/hdf5/find_package_zlib.patch new file mode 100644 index 000000000..73b65e826 --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/find_package_zlib.patch @@ -0,0 +1,12 @@ +diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake +index dbd68fd110..3d06b13d57 100644 +--- a/CMakeFilters.cmake ++++ b/CMakeFilters.cmake +@@ -70,7 +70,6 @@ option (HDF5_ENABLE_Z_LIB_SUPPORT "Enable Zlib Filters" ON) + if (HDF5_ENABLE_Z_LIB_SUPPORT) + if (NOT H5_ZLIB_HEADER) + if (NOT ZLIB_USE_EXTERNAL) +- find_package (ZLIB NAMES ${ZLIB_PACKAGE_NAME}${HDF_PACKAGE_EXT} COMPONENTS static shared) + if (NOT ZLIB_FOUND) + find_package (ZLIB) # Legacy find + endif () diff --git a/scripts/uberenv_configs/packages/hdf5/fortran-kinds-2.patch b/scripts/uberenv_configs/packages/hdf5/fortran-kinds-2.patch new file mode 100644 index 000000000..caee52010 --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/fortran-kinds-2.patch @@ -0,0 +1,24 @@ +From 598df49b738fd99df9f2671e4e967fd9c33ae8a9 Mon Sep 17 00:00:00 2001 +From: Seth R Johnson +Date: Wed, 16 Feb 2022 20:38:03 -0500 +Subject: [PATCH] Close file to work around GCC11.2/macOS12 bug + +--- + m4/aclocal_fc.f90 | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/m4/aclocal_fc.f90 b/m4/aclocal_fc.f90 +index e9a11c0ab5..bfda49aa40 100644 +--- a/m4/aclocal_fc.f90 ++++ b/m4/aclocal_fc.f90 +@@ -151,6 +151,7 @@ PROGRAM FC_AVAIL_KINDS + WRITE(8,'(I0)') max_decimal_prec + WRITE(8,'(I0)') num_ikinds + WRITE(8,'(I0)') num_rkinds ++ CLOSE(8) + END PROGRAM FC_AVAIL_KINDS + !---- END ----- Determine the available KINDs for REALs and INTEGERs + +-- +2.32.0 + diff --git a/scripts/uberenv_configs/packages/hdf5/fortran-kinds.patch b/scripts/uberenv_configs/packages/hdf5/fortran-kinds.patch new file mode 100644 index 000000000..ffe5e6f3f --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/fortran-kinds.patch @@ -0,0 +1,20 @@ +--- a/config/cmake/HDF5UseFortran.cmake ++++ a/config/cmake/HDF5UseFortran.cmake +@@ -181,6 +181,7 @@ + WRITE(8,'(I0)') max_decimal_prec + WRITE(8,'(I0)') num_ikinds + WRITE(8,'(I0)') num_rkinds ++ CLOSE(8) + END PROGRAM FC_AVAIL_KINDS + " + ) +--- a/m4/aclocal_fc.f90 ++++ b/m4/aclocal_fc.f90 +@@ -151,6 +151,7 @@ + WRITE(8,'(I0)') max_decimal_prec + WRITE(8,'(I0)') num_ikinds + WRITE(8,'(I0)') num_rkinds ++ CLOSE(8) + END PROGRAM FC_AVAIL_KINDS + !---- END ----- Determine the available KINDs for REALs and INTEGERs + diff --git a/scripts/uberenv_configs/packages/hdf5/h5f90global-mult-obj-same-equivalence-same-common-block.patch b/scripts/uberenv_configs/packages/hdf5/h5f90global-mult-obj-same-equivalence-same-common-block.patch new file mode 100644 index 000000000..8bf5c142e --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/h5f90global-mult-obj-same-equivalence-same-common-block.patch @@ -0,0 +1,16 @@ +diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90 +index dd2b171..629418a 100644 +--- a/fortran/src/H5f90global.F90 ++++ b/fortran/src/H5f90global.F90 +@@ -142,10 +142,7 @@ MODULE H5GLOBAL + + INTEGER(HID_T), DIMENSION(PREDEF_TYPES_LEN) :: predef_types + EQUIVALENCE (predef_types(1), H5T_NATIVE_INTEGER_KIND(1)) +- EQUIVALENCE (predef_types(2), H5T_NATIVE_INTEGER_KIND(2)) +- EQUIVALENCE (predef_types(3), H5T_NATIVE_INTEGER_KIND(3)) +- EQUIVALENCE (predef_types(4), H5T_NATIVE_INTEGER_KIND(4)) +- EQUIVALENCE (predef_types(5), H5T_NATIVE_INTEGER_KIND(5)) ++ ! EQUIVALENCE predef_types(2:5) are unnecessary and violate the standard + EQUIVALENCE (predef_types(6), H5T_NATIVE_INTEGER) + EQUIVALENCE (predef_types(7), H5T_NATIVE_REAL) + EQUIVALENCE (predef_types(8), H5T_NATIVE_DOUBLE) diff --git a/scripts/uberenv_configs/packages/hdf5/h5public-skip-mpicxx.patch b/scripts/uberenv_configs/packages/hdf5/h5public-skip-mpicxx.patch new file mode 100644 index 000000000..d60934552 --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/h5public-skip-mpicxx.patch @@ -0,0 +1,11 @@ +--- a/src/H5public.h 2019-08-28 18:51:39.393781356 -0400 ++++ b/src/H5public.h 2019-08-28 20:59:50.315181711 -0400 +@@ -57,6 +57,8 @@ + # include + #endif + #ifdef H5_HAVE_PARALLEL ++# define MPICH_SKIP_MPICXX 1 ++# define OMPI_SKIP_MPICXX 1 + # include + #ifndef MPI_FILE_NULL /*MPIO may be defined in mpi.h already */ + # include diff --git a/scripts/uberenv_configs/packages/hdf5/hdf5_1.8_gcc10.patch b/scripts/uberenv_configs/packages/hdf5/hdf5_1.8_gcc10.patch new file mode 100644 index 000000000..0de8c33c0 --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/hdf5_1.8_gcc10.patch @@ -0,0 +1,12 @@ +diff -Naur hdf5.orig/fortran/test/tH5T_F03.f90 hdf5/fortran/test/tH5T_F03.f90 +--- hdf5.orig/fortran/test/tH5T_F03.f90 2021-01-19 13:23:11.298000000 +0100 ++++ hdf5/fortran/test/tH5T_F03.f90 2021-01-19 13:19:17.637000000 +0100 +@@ -1541,7 +1541,7 @@ + INTEGER :: A, B, C, D + INTEGER :: Aw, Bw, Cw, Dw + INTEGER :: i, j +- INTEGER, PARAMETER :: hex = Z'00000003' ++ INTEGER, PARAMETER :: hex = INT(Z'00000003') + TYPE(C_PTR) :: f_ptr + INTEGER :: error ! Error flag + ! diff --git a/scripts/uberenv_configs/packages/hdf5/hdf5_1_14_0_config_find_mpi.patch b/scripts/uberenv_configs/packages/hdf5/hdf5_1_14_0_config_find_mpi.patch new file mode 100644 index 000000000..49f7fc837 --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/hdf5_1_14_0_config_find_mpi.patch @@ -0,0 +1,13 @@ +diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in +index 35cee4f..b336377 100644 +--- a/config/cmake/hdf5-config.cmake.in ++++ b/config/cmake/hdf5-config.cmake.in +@@ -63,6 +63,8 @@ if (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL) + set (${HDF5_PACKAGE_NAME}_MPI_Fortran_INCLUDE_PATH "@MPI_Fortran_INCLUDE_DIRS@") + set (${HDF5_PACKAGE_NAME}_MPI_Fortran_LIBRARIES "@MPI_Fortran_LIBRARIES@") + endif () ++ ++ find_package(MPI QUIET REQUIRED) + endif () + + if (${HDF5_PACKAGE_NAME}_BUILD_JAVA) diff --git a/scripts/uberenv_configs/packages/hdf5/hdf5_1_14_3_fpe.patch b/scripts/uberenv_configs/packages/hdf5/hdf5_1_14_3_fpe.patch new file mode 100644 index 000000000..90ab32a0f --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/hdf5_1_14_3_fpe.patch @@ -0,0 +1,203 @@ +diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1 +index 328f8d3cec..079f08d96c 100644 +--- a/config/linux-gnulibc1 ++++ b/config/linux-gnulibc1 +@@ -173,10 +173,7 @@ case $FC_BASENAME in + nagfor) + + F9XSUFFIXFLAG="" +- # NOTE: The default is -ieee=stop, which will cause problems +- # when the H5T module performs floating-point type +- # introspection +- AM_FCFLAGS="$AM_FCFLAGS -ieee=full" ++ AM_FCFLAGS="$AM_FCFLAGS" + FSEARCH_DIRS="" + + # Production +diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt +index 200576332b..0aa139761d 100644 +--- a/release_docs/RELEASE.txt ++++ b/release_docs/RELEASE.txt +@@ -246,6 +246,27 @@ Support for new platforms, languages and compilers + - + + ++Patches applied since the HDF5-1.14.3 release ++============================================= ++ Library ++ ------- ++ - Suppressed floating-point exceptions in H5T init code ++ ++ The floating-point datatype initialization code in H5Tinit_float.c ++ could raise FE_INVALID exceptions while munging bits and performing ++ comparisons that might involve NaN. This was not a problem when the ++ initialization code was executed in H5detect at compile time (prior ++ to 1.14.3), but now that the code is executed at library startup ++ (1.14.3+), these exceptions can be caught by user code, as is the ++ default in the NAG Fortran compiler. ++ ++ Starting in 1.14.4, we now suppress floating-point exceptions while ++ initializing the floating-point types and clear FE_INVALID before ++ restoring the original environment. ++ ++ Fixes GitHub #3831 ++ ++ + Bug Fixes since HDF5-1.14.2 release + =================================== + Library +@@ -619,12 +640,6 @@ Known Problems + this release with link errors. As a result, Windows binaries for this release + will not include Fortran. The problem will be addressed in HDF5 1.14.4. + +- IEEE standard arithmetic enables software to raise exceptions such as overflow, +- division by zero, and other illegal operations without interrupting or halting +- the program flow. The HDF5 C library intentionally performs these exceptions. +- Therefore, the "-ieee=full" nagfor switch is necessary when compiling a program +- to avoid stopping on an exception. +- + CMake files do not behave correctly with paths containing spaces. + Do not use spaces in paths because the required escaping for handling spaces + results in very complex and fragile build files. +diff --git a/src/H5Tinit_float.c b/src/H5Tinit_float.c +index 3b9e127fe4..02bb3bad77 100644 +--- a/src/H5Tinit_float.c ++++ b/src/H5Tinit_float.c +@@ -51,19 +51,23 @@ + * Function: DETECT_F + * + * Purpose: This macro takes a floating point type like `double' and +- * a base name like `natd' and detects byte order, mantissa +- * location, exponent location, sign bit location, presence or +- * absence of implicit mantissa bit, and exponent bias and +- * initializes a detected_t structure with those properties. ++ * detects byte order, mantissa location, exponent location, ++ * sign bit location, presence or absence of implicit mantissa ++ * bit, and exponent bias and initializes a detected_t structure ++ * with those properties. ++ * ++ * Note that these operations can raise floating-point ++ * exceptions and building with some compiler options ++ * (especially Fortran) can cause problems. + *------------------------------------------------------------------------- + */ +-#define DETECT_F(TYPE, VAR, INFO) \ ++#define DETECT_F(TYPE, INFO) \ + do { \ +- TYPE _v1, _v2, _v3; \ +- unsigned char _buf1[sizeof(TYPE)], _buf3[sizeof(TYPE)]; \ +- unsigned char _pad_mask[sizeof(TYPE)]; \ +- unsigned char _byte_mask; \ +- int _i, _j, _last = (-1); \ ++ TYPE _v1, _v2, _v3; \ ++ uint8_t _buf1[sizeof(TYPE)], _buf3[sizeof(TYPE)]; \ ++ uint8_t _pad_mask[sizeof(TYPE)]; \ ++ uint8_t _byte_mask; \ ++ int _i, _j, _last = -1; \ + \ + memset(&INFO, 0, sizeof(INFO)); \ + INFO.size = sizeof(TYPE); \ +@@ -81,7 +85,7 @@ + _v1 = (TYPE)4.0L; \ + H5MM_memcpy(_buf1, (const void *)&_v1, sizeof(TYPE)); \ + for (_i = 0; _i < (int)sizeof(TYPE); _i++) \ +- for (_byte_mask = (unsigned char)1; _byte_mask; _byte_mask = (unsigned char)(_byte_mask << 1)) { \ ++ for (_byte_mask = (uint8_t)1; _byte_mask; _byte_mask = (uint8_t)(_byte_mask << 1)) { \ + _buf1[_i] ^= _byte_mask; \ + H5MM_memcpy((void *)&_v2, (const void *)_buf1, sizeof(TYPE)); \ + H5_GCC_CLANG_DIAG_OFF("float-equal") \ +@@ -118,7 +122,7 @@ + _v1 = (TYPE)1.0L; \ + _v2 = (TYPE)-1.0L; \ + if (H5T__bit_cmp(sizeof(TYPE), INFO.perm, &_v1, &_v2, _pad_mask, &(INFO.sign)) < 0) \ +- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to detect byte order"); \ ++ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to determine sign bit"); \ + \ + /* Mantissa */ \ + INFO.mpos = 0; \ +@@ -126,12 +130,11 @@ + _v1 = (TYPE)1.0L; \ + _v2 = (TYPE)1.5L; \ + if (H5T__bit_cmp(sizeof(TYPE), INFO.perm, &_v1, &_v2, _pad_mask, &(INFO.msize)) < 0) \ +- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to detect byte order"); \ ++ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to determine mantissa"); \ + INFO.msize += 1 + (unsigned)(INFO.imp ? 0 : 1) - INFO.mpos; \ + \ + /* Exponent */ \ +- INFO.epos = INFO.mpos + INFO.msize; \ +- \ ++ INFO.epos = INFO.mpos + INFO.msize; \ + INFO.esize = INFO.sign - INFO.epos; \ + \ + _v1 = (TYPE)1.0L; \ +@@ -456,17 +459,24 @@ H5T__set_precision(H5T_fpoint_det_t *d) + herr_t H5_NO_UBSAN + H5T__init_native_float_types(void) + { ++ fenv_t saved_fenv; + H5T_fpoint_det_t det; + H5T_t *dt = NULL; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + ++ /* Turn off floating-point exceptions while initializing to avoid ++ * tripping over signaling NaNs while looking at "don't care" bits. ++ */ ++ if (feholdexcept(&saved_fenv) != 0) ++ HSYS_GOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "can't save floating-point environment"); ++ + /* H5T_NATIVE_FLOAT */ + + /* Get the type's characteristics */ + memset(&det, 0, sizeof(H5T_fpoint_det_t)); +- DETECT_F(float, FLOAT, det); ++ DETECT_F(float, det); + + /* Allocate and fill type structure */ + if (NULL == (dt = H5T__alloc())) +@@ -497,7 +507,7 @@ H5T__init_native_float_types(void) + + /* Get the type's characteristics */ + memset(&det, 0, sizeof(H5T_fpoint_det_t)); +- DETECT_F(double, DOUBLE, det); ++ DETECT_F(double, det); + + /* Allocate and fill type structure */ + if (NULL == (dt = H5T__alloc())) +@@ -528,7 +538,7 @@ H5T__init_native_float_types(void) + + /* Get the type's characteristics */ + memset(&det, 0, sizeof(H5T_fpoint_det_t)); +- DETECT_F(long double, LDOUBLE, det); ++ DETECT_F(long double, det); + + /* Allocate and fill type structure */ + if (NULL == (dt = H5T__alloc())) +@@ -561,6 +571,14 @@ H5T__init_native_float_types(void) + H5T_native_order_g = det.order; + + done: ++ /* Clear any FE_INVALID exceptions from NaN handling */ ++ if (feclearexcept(FE_INVALID) != 0) ++ HSYS_GOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "can't clear floating-point exceptions"); ++ ++ /* Restore the original environment */ ++ if (feupdateenv(&saved_fenv) != 0) ++ HSYS_GOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "can't restore floating-point environment"); ++ + if (ret_value < 0) { + if (dt != NULL) { + dt->shared = H5FL_FREE(H5T_shared_t, dt->shared); +diff --git a/src/H5private.h b/src/H5private.h +index 14a0ac3225..3aaa0d5245 100644 +--- a/src/H5private.h ++++ b/src/H5private.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include diff --git a/scripts/uberenv_configs/packages/hdf5/package.py b/scripts/uberenv_configs/packages/hdf5/package.py new file mode 100644 index 000000000..0adac87dd --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/package.py @@ -0,0 +1,801 @@ +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import os +import re +import shutil +import sys + +import llnl.util.lang +import llnl.util.tty as tty + +from spack.package import * + + +class Hdf5(CMakePackage): + """HDF5 is a data model, library, and file format for storing and managing + data. It supports an unlimited variety of datatypes, and is designed for + flexible and efficient I/O and for high volume and complex data. + """ + + homepage = "https://support.hdfgroup.org" + url = "https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_5/downloads/hdf5-1.14.5.tar.gz" + + git = "https://github.com/HDFGroup/hdf5.git" + maintainers("lrknox", "brtnfld", "byrnHDF", "gheber", "hyoklee", "lkurz") + + tags = ["e4s", "windows"] + executables = ["^h5cc$", "^h5pcc$"] + + test_requires_compiler = True + + license("custom") + + depends_on("cxx", type="build", when="+cxx") + depends_on("fortran", type="build", when="+fortran") + + # The 'develop' version is renamed so that we could uninstall (or patch) it + # without affecting other develop version. + version("develop-2.0", branch="develop") + version("develop-1.14", branch="hdf5_1_14") + version("develop-1.12", branch="hdf5_1_12") + version("develop-1.10", branch="hdf5_1_10") + version("develop-1.8", branch="hdf5_1_8") + + # Odd versions are considered experimental releases + # Even versions are maintenance versions + version( + "2.0.0", + sha256="6e45a4213cb11bb5860e1b0a7645688ab55562cc2d55c6ff9bcb0984ed12b22b", + url="https://support.hdfgroup.org/releases/hdf5/v2_0/v2_0_0/downloads/hdf5-2.0.0.tar.gz", + preferred=True, + ) + version( + "1.14.5", + sha256="ec2e13c52e60f9a01491bb3158cb3778c985697131fc6a342262d32a26e58e44", + url="https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_5/downloads/hdf5-1.14.5.tar.gz" + ) + version( + "1.14.4-3", + sha256="019ac451d9e1cf89c0482ba2a06f07a46166caf23f60fea5ef3c37724a318e03", + url="https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_4/downloads/hdf5-1.14.4-3.tar.gz", + ) + version( + "1.14.3", + sha256="09cdb287aa7a89148c1638dd20891fdbae08102cf433ef128fd345338aa237c7", + url="https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_3/downloads/hdf5-1.14.3.tar.gz", + ) + version( + "1.14.2", + sha256="1c342e634008284a8c2794c8e7608e2eaf26d01d445fb3dfd7f33cb2fb51ac53", + url="https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_2/downloads/hdf5-1.14.2.tar.gz", + ) + version( + "1.14.1-2", + sha256="cbe93f275d5231df28ced9549253793e40cd2b555e3d288df09d7b89a9967b07", + url="https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_1/downloads/hdf5-1.14.1-2.tar.gz", + ) + version( + "1.14.0", + sha256="a571cc83efda62e1a51a0a912dd916d01895801c5025af91669484a1575a6ef4", + url="https://support.hdfgroup.org/releases/hdf5/v1_14/v1_14_0/downloads/hdf5-1.14.0.tar.gz", + ) + version("1.12.3", sha256="c15adf34647918dd48150ea1bd9dffd3b32a3aec5298991d56048cc3d39b4f6f") + version("1.12.2", sha256="2a89af03d56ce7502dcae18232c241281ad1773561ec00c0f0e8ee2463910f14") + version("1.12.1", sha256="79c66ff67e666665369396e9c90b32e238e501f345afd2234186bfb8331081ca") + version("1.12.0", sha256="a62dcb276658cb78e6795dd29bf926ed7a9bc4edf6e77025cd2c689a8f97c17a") + version("1.10.11", sha256="341684c5c0976b8c7e6951735a400275a90693604464cac73e9f323c696fc79c") + version("1.10.10", sha256="a6877ab7bd5d769d2d68618fdb54beb50263dcc2a8c157fe7e2186925cdb02db") + version("1.10.9", sha256="f5b77f59b705a755a5a223372d0222c7bc408fe8db6fa8d9d7ecf8bce291b8dd") + version("1.10.8", sha256="d341b80d380dd763753a0ebe22915e11e87aac4e44a084a850646ff934d19c80") + version("1.10.7", sha256="7a1a0a54371275ce2dfc5cd093775bb025c365846512961e7e5ceaecb437ef15") + version("1.10.6", sha256="5f9a3ee85db4ea1d3b1fa9159352aebc2af72732fc2f58c96a3f0768dba0e9aa") + version("1.10.5", sha256="6d4ce8bf902a97b050f6f491f4268634e252a63dadd6656a1a9be5b7b7726fa8") + version("1.10.4", sha256="8f60dc4dd6ab5fcd23c750d1dc5bca3d0453bdce5c8cdaf0a4a61a9d1122adb2") + version("1.10.3", sha256="b600d7c914cfa80ae127cd1a1539981213fee9994ac22ebec9e3845e951d9b39") + version("1.10.2", sha256="bfec1be8c366965a99812cf02ddc97e4b708c1754fccba5414d4adccdc073866") + version("1.10.1", sha256="048a9d149fb99aaa1680a712963f5a78e9c43b588d0e79d55e06760ec377c172") + version( + "1.10.0-patch1", sha256="6e78cfe32a10e6e0629393cdfddf6cfa536571efdaf85f08e35326e1b4e9eff0" + ) + version("1.10.0", sha256="81f6201aba5c30dced5dcd62f5d5477a2790fd5850e02ac514ca8bf3e2bb375a") + version("1.8.23", sha256="37fa4eb6cd0e181eb49a10d54611cb00700e9537f805d03e6853503afe5abc27") + version("1.8.22", sha256="8406d96d9355ef8961d2739fb8fd5474ad4cdf52f3cfac657733defd9709bfaa") + version("1.8.21", sha256="87d8c82eba5cf766d97cd06c054f4639c1049c4adeaa3a79f77f8bd374f80f37") + version("1.8.19", sha256="a4335849f19fae88c264fd0df046bc321a78c536b2548fc508627a790564dc38") + version("1.8.18", sha256="cdb195ad8d9e6782acf24b2488061289f615628c2ccda8457b0a0c3fb7a8a063") + version("1.8.17", sha256="d9cda297ee76ade9881c4208987939250d397bae6252d0ccb66fa7d24d67e263") + version("1.8.16", sha256="ed17178abd9928a7237f30370189ba767b9e39e0db45917c2ac4665eb9cb4771") + version("1.8.15", sha256="4e963216b7d32469596bc1321a8c3f6e0c278dcbbdb7be6414c63c081b34c275") + version("1.8.14", sha256="1dbefeeef7f591897c632b2b090db96bb8d35ad035beaa36bc39cb2bc67e0639") + version("1.8.13", sha256="82f6b38eec103b4fccfbf14892786e0c27a8135d3252d8601cf5bf20066d38c1") + version("1.8.12", sha256="b5cccea850096962b5fd9e96f22c4f47d2379224bb41130d9bc038bb6c37dfcb") + version("1.8.10", sha256="4813b79c5fb8701a625b9924b8203bc7154a77f9b826ad4e034144b4056a160a") + + depends_on("c", type="build") # generated + depends_on("cxx", type="build") # generated + depends_on("fortran", type="build") # generated + + variant("shared", default=True, description="Builds a shared version of the library") + + variant("hl", default=False, description="Enable the high-level library") + variant("cxx", default=False, description="Enable C++ support") + variant("map", when="@1.14:", default=False, description="Enable MAP API support") + variant( + "subfiling", when="@1.14: +mpi", default=False, description="Enable Subfiling VFD support" + ) + variant("fortran", default=False, description="Enable Fortran support") + variant("java", when="@1.10:", default=False, description="Enable Java support") + variant("threadsafe", default=False, description="Enable thread-safe capabilities") + variant("tools", default=True, description="Enable building tools") + variant("mpi", default=True, description="Enable MPI support") + variant("szip", default=False, description="Enable szip support") + # Build HDF5 with API compatibility. + variant( + "api", + default="default", + description="Choose api compatibility for earlier version", + values=("default", "v116", "v114", "v112", "v110", "v18", "v16"), + multi=False, + ) + + depends_on("cmake@3.12:", type="build") + depends_on("cmake@3.18:", type="build", when="@1.13:") + + with when("+mpi"): + depends_on("mpi") + depends_on("mpich+fortran", when="+fortran ^[virtuals=mpi] mpich") + + depends_on("java", type=("build", "run"), when="+java") + depends_on("szip", when="+szip") + + depends_on("zlib-api") + # See https://github.com/HDFGroup/hdf5/pull/4147 + depends_on( + "zlib-ng~new_strategies", + when="@:1.14.3,develop-1.8:develop-1.12 ^[virtuals=zlib-api] zlib-ng", + ) + + # The compiler wrappers (h5cc, h5fc, etc.) run 'pkg-config'. + # Skip this on Windows since pkgconfig is autotools + for plat in ["darwin", "linux"]: + depends_on("pkgconfig", when=f"platform={plat}", type="run") + + conflicts("+mpi", "^mpich@4.0:4.0.3") + conflicts("api=v116", when="@1.6:1.14", msg="v116 is not compatible with this release") + conflicts( + "api=v116", + when="@develop-1.8:develop-1.14", + msg="v116 is not compatible with this release", + ) + conflicts("api=v114", when="@1.6:1.12", msg="v114 is not compatible with this release") + conflicts( + "api=v114", + when="@develop-1.8:develop-1.12", + msg="v114 is not compatible with this release", + ) + conflicts("api=v112", when="@1.6:1.10", msg="v112 is not compatible with this release") + conflicts( + "api=v112", + when="@develop-1.8:develop-1.10", + msg="v112 is not compatible with this release", + ) + conflicts("api=v110", when="@1.6:1.8", msg="v110 is not compatible with this release") + conflicts("api=v110", when="@develop-1.8", msg="v110 is not compatible with this release") + conflicts("api=v18", when="@1.6", msg="v18 is not compatible with this release") + + # The Java wrappers cannot be built without shared libs. + conflicts("+java", when="~shared") + # Fortran fails built with shared for old HDF5 versions + conflicts("+fortran", when="+shared@:1.8.15") + # See https://github.com/spack/spack/issues/31085 + conflicts("+fortran+mpi", when="@1.8.22") + # See https://github.com/HDFGroup/hdf5/issues/2906#issue-1697749645 + conflicts( + "+fortran", when="@1.13.3:^cmake@:3.22", msg="cmake_minimum_required is not set correctly." + ) + + # HDF5 searches for zlib CMake config files before it falls back to + # FindZLIB.cmake. We don't build zlib with CMake by default, so have to + # delete the first search, otherwise it may find a system zlib. See + # https://github.com/HDFGroup/hdf5/issues/4904 + patch("find_package_zlib.patch", when="@1.8.16:1.14.4") + + # There are several officially unsupported combinations of the features: + # 1. Thread safety is not guaranteed via high-level C-API but in some cases + # it works. + # conflicts('+threadsafe+hl') + + # 2. Thread safety is not guaranteed via Fortran (CXX) API, but it's + # possible for a dependency tree to contain a package that uses Fortran + # (CXX) API in a single thread and another one that uses low-level C-API + # in multiple threads. To allow for such scenarios, we don't specify the + # following conflicts. + # conflicts('+threadsafe+cxx') + # conflicts('+threadsafe+fortran') + + # 3. Parallel features are not supported via CXX API, but for the reasons + # described in #2 we allow for such combination. + # conflicts('+mpi+cxx') + + # Patch needed for HDF5 1.14.3 to fix signaling FPE checks from triggering + # at dynamic type system initialization. The type system's builtin types + # were refactored in 1.14.3 and switched from compile-time to run-time + # initialization. This patch suppresses floating point exception checks + # that would otherwise be triggered by this code. Later HDF5 versions + # will include the patch code changes. + # See https://github.com/HDFGroup/hdf5/pull/3837 + patch("hdf5_1_14_3_fpe.patch", when="@1.14.3") + + # There are known build failures with intel@18.0.1. This issue is + # discussed and patch is provided at + # https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/747951. + patch("h5f90global-mult-obj-same-equivalence-same-common-block.patch", when="@1.10.1%intel@18") + + # Turn line comments into block comments to conform with pre-C99 language + # standards. Versions of hdf5 after 1.8.10 don't require this patch, + # either because they conform to pre-C99 or neglect to ask for pre-C99 + # language standards from their compiler. The hdf5 build system adds + # the -ansi cflag (run 'man gcc' for info on -ansi) for some versions + # of some compilers (see hdf5-1.8.10/config/gnu-flags). The hdf5 build + # system does not provide an option to disable -ansi, but since the + # pre-C99 code is restricted to just five lines of line comments in + # three src files, this patch accomplishes the simple task of patching the + # three src files and leaves the hdf5 build system alone. + patch("pre-c99-comments.patch", when="@1.8.10") + + # There are build errors with GCC 8, see + # https://forum.hdfgroup.org/t/1-10-2-h5detect-compile-error-gcc-8-1-0-on-centos-7-2-solved/4441 + patch( + "https://salsa.debian.org/debian-gis-team/hdf5/raw/bf94804af5f80f662cad80a5527535b3c6537df6/debian/patches/gcc-8.patch", + sha256="57cee5ff1992b4098eda079815c36fc2da9b10e00a9056df054f2384c4fc7523", + when="@1.10.2%gcc@8:", + ) + + # Disable MPI C++ interface when C++ is disabled, otherwise downstream + # libraries fail to link; see https://github.com/spack/spack/issues/12586 + patch( + "h5public-skip-mpicxx.patch", + when="@1.8.10:1.8.21,1.10.0:1.10.5+mpi~cxx", + sha256="b61e2f058964ad85be6ee5ecea10080bf79e73f83ff88d1fa4b602d00209da9c", + ) + + # Fixes BOZ literal constant error when compiled with GCC 10. + # The issue is described here: https://github.com/spack/spack/issues/18625 + patch( + "hdf5_1.8_gcc10.patch", + when="@:1.8.21", + sha256="0e20187cda3980a4fdff410da92358b63de7ebef2df1d7a425371af78e50f666", + ) + + patch("fortran-kinds.patch", when="@1.10.7") + + # This patch may only be needed with GCC 11.2 on macOS, but it's valid for + # any of the head HDF5 versions as of 12/2021. Since it's impossible to + # tell what Fortran version is part of a mixed apple-clang toolchain on + # macOS (which is the norm), and this might be an issue for other compilers + # as well, we just apply it to all platforms. + # See https://github.com/HDFGroup/hdf5/issues/1157 + patch("fortran-kinds-2.patch", when="@1.10.8,1.12.1") + + # Patch needed for HDF5 1.14.0 where dependency on MPI::MPI_C was declared + # PUBLIC. Dependent packages using the default hdf5 package but not + # expecting to use MPI then failed to configure because they did not call + # find_package(MPI). This patch does that for them. Later HDF5 versions + # will include the patch code changes. + patch("hdf5_1_14_0_config_find_mpi.patch", when="@1.14.0") + + # The argument 'buf_size' of the C function 'h5fget_file_image_c' is + # declared as intent(in) though it is modified by the invocation. As a + # result, aggressive compilers such as Fujitsu's may do a wrong + # optimization to cause an error. + def patch(self): + filter_file( + "INTEGER(SIZE_T), INTENT(IN) :: buf_size", + "INTEGER(SIZE_T), INTENT(OUT) :: buf_size", + "fortran/src/H5Fff.F90", + string=True, + ignore_absent=True, + ) + filter_file( + "INTEGER(SIZE_T), INTENT(IN) :: buf_size", + "INTEGER(SIZE_T), INTENT(OUT) :: buf_size", + "fortran/src/H5Fff_F03.f90", + string=True, + ignore_absent=True, + ) + if self.run_tests: + # hdf5 has ~2200 CPU-intensive tests, some of them have races: + # Often, these loop endless(at least on one Xeon and one EPYC). + # testphdf5 fails indeterministic. This fixes finishing the tests + filter_file( + "REMOVE_ITEM H5P_TESTS", + "REMOVE_ITEM H5P_TESTS t_bigio t_shapesame testphdf5", + "testpar/CMakeTests.cmake", + ) + + # The parallel compiler wrappers (i.e. h5pcc, h5pfc, etc.) reference MPI + # compiler wrappers and do not need to be changed. + # These do not exist on Windows. + # Enable only for supported target platforms. + + if sys.platform != "win32": + filter_compiler_wrappers( + "h5cc", "h5hlcc", "h5fc", "h5hlfc", "h5c++", "h5hlc++", relative_root="bin" + ) + + def url_for_version(self, version): + url = "https://support.hdfgroup.org/archive/support/ftp/HDF5/releases/hdf5-{0}/hdf5-{1}/src/hdf5-{1}.tar.gz" + return url.format(version.up_to(2), version) + + def flag_handler(self, name, flags): + spec = self.spec + cmake_flags = [] + + if name == "cflags": + if ( + spec.satisfies("%gcc") + or spec.satisfies("%clang") + or spec.satisfies("%apple-clang") + or spec.satisfies("%oneapi") + ): + # Quiet warnings/errors about implicit declaration of functions + # in C99: + cmake_flags.append("-Wno-error=implicit-function-declaration") + # Note that this flag will cause an error if building %nvhpc. + if spec.satisfies("@:1.8.12~shared"): + # More recent versions set CMAKE_POSITION_INDEPENDENT_CODE to + # True and build with PIC flags. + cmake_flags.append(self.compiler.cc_pic_flag) + if spec.satisfies("@1.8.21 %oneapi@2023.0.0"): + cmake_flags.append("-Wno-error=int-conversion") + elif name == "cxxflags": + if spec.satisfies("@:1.8.12+cxx~shared"): + cmake_flags.append(self.compiler.cxx_pic_flag) + elif name == "fflags": + if spec.satisfies("%cce+fortran"): + # Cray compiler generates module files with uppercase names by + # default, which is not handled by the CMake scripts. The + # following flag forces the compiler to produce module files + # with lowercase names. + cmake_flags.append("-ef") + if spec.satisfies("@:1.8.12+fortran~shared"): + cmake_flags.append(self.compiler.fc_pic_flag) + elif name == "ldlibs": + if spec.satisfies("+fortran %fj"): + cmake_flags.extend(["-lfj90i", "-lfj90f", "-lfjsrcinfo", "-lelf"]) + + return flags, None, (cmake_flags or None) + + @property + def libs(self): + """HDF5 can be queried for the following parameters: + + - "hl": high-level interface + - "cxx": C++ APIs + - "fortran": Fortran APIs + - "java": Java APIs + + :return: list of matching libraries + """ + query_parameters = self.spec.last_query.extra_parameters + + shared = self.spec.satisfies("+shared") + + # This map contains a translation from query_parameters + # to the libraries needed + query2libraries = { + tuple(): ["libhdf5"], + ("cxx", "fortran", "hl", "java"): [ + # When installed with Autotools, the basename of the real + # library file implementing the High-level Fortran interface is + # 'libhdf5hl_fortran'. Starting versions 1.8.22, 1.10.5 and + # 1.12.0, the Autotools installation also produces a symbolic + # link 'libhdf5_hl_fortran.' to + # 'libhdf5hl_fortran.'. Note that in the case of the + # dynamic library, the latter is a symlink to the real sonamed + # file 'libhdf5_fortran.so.'. This means that all + # dynamically linked executables/libraries of the dependent + # packages need 'libhdf5_fortran.so.' with the same + # DT_SONAME entry. However, the CMake installation (at least + # starting version 1.8.10) does not produce it. Instead, the + # basename of the library file is 'libhdf5_hl_fortran'. Which + # means that switching to CMake requires rebuilding of all + # dependant packages that use the High-level Fortran interface. + # Therefore, we do not try to preserve backward compatibility + # with Autotools installations by creating symlinks. The only + # packages that could benefit from it would be those that + # hardcode the library name in their building systems. Such + # packages should simply be patched. + "libhdf5_hl_fortran", + "libhdf5_hl_f90cstub", + "libhdf5_hl_cpp", + "libhdf5_hl", + "libhdf5_fortran", + "libhdf5_f90cstub", + "libhdf5_java", + "libhdf5", + ], + ("cxx", "hl"): ["libhdf5_hl_cpp", "libhdf5_hl", "libhdf5"], + ("fortran", "hl"): [ + "libhdf5_hl_fortran", + "libhdf5_hl_f90cstub", + "libhdf5_hl", + "libhdf5_fortran", + "libhdf5_f90cstub", + "libhdf5", + ], + ("hl",): ["libhdf5_hl", "libhdf5"], + ("cxx", "fortran"): ["libhdf5_fortran", "libhdf5_f90cstub", "libhdf5_cpp", "libhdf5"], + ("cxx",): ["libhdf5_cpp", "libhdf5"], + ("fortran",): ["libhdf5_fortran", "libhdf5_f90cstub", "libhdf5"], + ("java",): ["libhdf5_java", "libhdf5"], + } + + # Turn the query into the appropriate key + key = tuple(sorted(query_parameters)) + libraries = query2libraries[key] + + return find_libraries(libraries, root=self.prefix, shared=shared, recursive=True) + + @classmethod + def determine_version(cls, exe): + output = Executable(exe)("-showconfig", output=str, error=str) + match = re.search(r"HDF5 Version: (\d+\.\d+\.\d+)(\D*\S*)", output) + return match.group(1) if match else None + + @classmethod + def determine_variants(cls, exes, version): + def is_enabled(text): + return text.lower() in ["t", "true", "enabled", "yes", "1", "on"] + + results = [] + for exe in exes: + variants = [] + output = Executable(exe)("-showconfig", output=str, error=os.devnull) + match = re.search(r"High-level library: (\S+)", output) + if match and is_enabled(match.group(1)): + variants.append("+hl") + else: + variants.append("~hl") + + match = re.search(r"Parallel HDF5: (\S+)", output) + if match and is_enabled(match.group(1)): + variants.append("+mpi") + else: + variants.append("~mpi") + + match = re.search(r"C\+\+: (\S+)", output) + if match and is_enabled(match.group(1)): + variants.append("+cxx") + else: + variants.append("~cxx") + + match = re.search(r"Fortran: (\S+)", output) + if match and is_enabled(match.group(1)): + variants.append("+fortran") + else: + variants.append("~fortran") + + match = re.search(r"Java: (\S+)", output) + if match and is_enabled(match.group(1)): + variants.append("+java") + else: + variants.append("~java") + + match = re.search(r"Threadsafety: (\S+)", output) + if match and is_enabled(match.group(1)): + variants.append("+threadsafe") + else: + variants.append("~threadsafe") + + match = re.search(r"Build HDF5 Tools: (\S+)", output) + if match and is_enabled(match.group(1)): + variants.append("+tools") + else: + variants.append("~tools") + + match = re.search(r"I/O filters \(external\): \S*(szip\(encoder\))\S*", output) + if match: + variants.append("+szip") + else: + variants.append("~szip") + + match = re.search(r"Default API mapping: (\S+)", output) + if match and match.group(1) in set(["v114", "v112", "v110", "v18", "v16"]): + variants.append("api={0}".format(match.group(1))) + + results.append(" ".join(variants)) + + return results + + @when("@:1.8.21,1.10.0:1.10.5+szip") + def setup_build_environment(self, env): + env.set("SZIP_INSTALL", self.spec["szip"].prefix) + + def setup_run_environment(self, env): + # According to related github posts and problems running test_install + # as a stand-alone test, it appears the lib path must be added to + # LD_LIBRARY_PATH. + env.append_path("LD_LIBRARY_PATH", self.prefix.lib) + + @run_before("cmake") + def fortran_check(self): + if self.spec.satisfies("+fortran") and not self.compiler.fc: + msg = "cannot build a Fortran variant without a Fortran compiler" + raise RuntimeError(msg) + + def cmake_args(self): + spec = self.spec + + if spec.satisfies("@:1.8.15+shared"): + tty.warn("hdf5@:1.8.15+shared does not produce static libraries") + + args = [ + # Always enable this option. This does not actually enable any + # features: it only *allows* the user to specify certain + # combinations of other arguments. + self.define("ALLOW_UNSUPPORTED", True), + # Speed-up the building by skipping the examples: + self.define("HDF5_BUILD_EXAMPLES", False), + self.define( + "BUILD_TESTING", + self.run_tests or + # Version 1.8.22 fails to build the tools when shared libraries + # are enabled but the tests are disabled. + spec.satisfies("@1.8.22+shared+tools"), + ), + self.define_from_variant("HDF5_ENABLE_SUBFILING_VFD", "subfiling"), + self.define_from_variant("HDF5_ENABLE_MAP_API", "map"), + self.define("HDF5_ENABLE_Z_LIB_SUPPORT", True), + self.define_from_variant("HDF5_ENABLE_SZIP_SUPPORT", "szip"), + self.define_from_variant("HDF5_ENABLE_SZIP_ENCODING", "szip"), + self.define_from_variant("BUILD_SHARED_LIBS", "shared"), + self.define("ONLY_SHARED_LIBS", False), + self.define_from_variant("HDF5_ENABLE_PARALLEL", "mpi"), + self.define_from_variant("HDF5_ENABLE_THREADSAFE", "threadsafe"), + self.define_from_variant("HDF5_BUILD_HL_LIB", "hl"), + self.define_from_variant("HDF5_BUILD_CPP_LIB", "cxx"), + self.define_from_variant("HDF5_BUILD_FORTRAN", "fortran"), + self.define_from_variant("HDF5_BUILD_JAVA", "java"), + self.define_from_variant("HDF5_BUILD_TOOLS", "tools"), + ] + + api = spec.variants["api"].value + if api != "default": + args.append(self.define("DEFAULT_API_VERSION", api)) + + # MSMPI does not provide compiler wrappers + # and pointing these variables at the MSVC compilers + # breaks CMake's mpi detection for MSMPI. + if spec.satisfies("+mpi") and "msmpi" not in spec: + args.extend( + [ + "-DMPI_CXX_COMPILER:PATH=%s" % spec["mpi"].mpicxx, + "-DMPI_C_COMPILER:PATH=%s" % spec["mpi"].mpicc, + ] + ) + + if spec.satisfies("+fortran"): + args.extend(["-DMPI_Fortran_COMPILER:PATH=%s" % spec["mpi"].mpifc]) + + # work-around for https://github.com/HDFGroup/hdf5/issues/1320 + if spec.satisfies("@1.10.8,1.13.0"): + args.append(self.define("HDF5_INSTALL_CMAKE_DIR", "share/cmake/hdf5")) + + # AOCC does not support _Float16 + if spec.satisfies("@1.14.4: %aocc"): + args.append(self.define("HDF5_ENABLE_NONSTANDARD_FEATURE_FLOAT16", False)) + + return args + + @run_after("install") + def ensure_parallel_compiler_wrappers(self): + # When installed with Autotools and starting at least version 1.8.10, + # the package produces C compiler wrapper called either 'h5cc' (when MPI + # support is disabled) or 'h5pcc' (when MPI support is enabled). The + # CMake installation produces the wrapper called 'h5cc' (regardless of + # whether MPI support is enabled) only starting versions 1.8.21, 1.10.2 + # and 1.12.0. The current develop versions also produce 'h5pcc' when MPI + # support is enabled and the file is identical to 'h5cc'. Here, we make + # sure that 'h5pcc' is available when MPI support is enabled (only for + # versions that generate 'h5cc'). + if self.spec.satisfies("@1.8.21:1.8.22,1.10.2:1.10.7,1.12.0+mpi"): + with working_dir(self.prefix.bin): + # No try/except here, fix the condition above instead: + symlink("h5cc", "h5pcc") + + # The same as for 'h5pcc'. However, the CMake installation produces the + # Fortran compiler wrapper called 'h5fc' only starting versions 1.8.22, + # 1.10.6 and 1.12.0. The current develop versions do not produce 'h5pfc' + # at all. Here, we make sure that 'h5pfc' is available when Fortran and + # MPI support are enabled (only for versions that generate 'h5fc'). + if self.spec.satisfies("@1.8.22:1.8," "1.10.6:1.10.9," "1.12.0:1.12.2" "+fortran+mpi"): + with working_dir(self.prefix.bin): + # No try/except here, fix the condition above instead: + symlink("h5fc", "h5pfc") + + @run_after("install") + def fix_package_config(self): + # We need to fix the pkg-config files, which are also used by the + # compiler wrappers. The files are created starting versions 1.8.21, + # 1.10.2 and 1.12.0. However, they are broken (except for the version + # 1.8.22): the files are named -.pc but reference + # packages. This was fixed in the develop versions at some point: the + # files started referencing - packages but got broken + # again: the files got names .pc but references had not been + # updated accordingly. Another issue, which we address here, is that + # some Linux distributions install pkg-config files named hdf5.pc and we + # want to override them. Therefore, the following solution makes sure + # that each -.pc file is symlinked by .pc and all + # references to - packages in the original files are + # replaced with references to packages. + pc_files = find(self.prefix.lib.pkgconfig, "hdf5*.pc", recursive=False) + + if not pc_files: + # This also tells us that the pkgconfig directory does not exist. + return + + # Replace versioned references in all pkg-config files: + filter_file( + r"(Requires(?:\.private)?:.*)(hdf5[^\s,]*)(?:-[^\s,]*)(.*)", + r"\1\2\3", + *pc_files, + backup=False, + ) + + # Create non-versioned symlinks to the versioned pkg-config files: + with working_dir(self.prefix.lib.pkgconfig): + for f in pc_files: + src_filename = os.path.basename(f) + version_sep_idx = src_filename.find("-") + if version_sep_idx > -1: + tgt_filename = src_filename[:version_sep_idx] + ".pc" + if not os.path.exists(tgt_filename): + symlink(src_filename, tgt_filename) + + @run_after("install") + def link_debug_libs(self): + # When build_type is Debug, the hdf5 build appends _debug to all library names. + # Dependents of hdf5 (netcdf-c etc.) can't handle those, thus make symlinks. + if self.spec.satisfies("build_type=Debug"): + libs = find(self.prefix.lib, "libhdf5*_debug.*", recursive=False) + with working_dir(self.prefix.lib): + for lib in libs: + libname = os.path.split(lib)[1] + os.symlink(libname, libname.replace("_debug", "")) + + @run_after("install") + def symlink_to_h5hl_wrappers(self): + if self.spec.satisfies("+hl"): + with working_dir(self.prefix.bin): + # CMake's FindHDF5 relies only on h5cc so it doesn't find the HL + # component unless it uses h5hlcc so we symlink h5cc to h5hlcc etc + symlink_files = {"h5cc": "h5hlcc", "h5c++": "h5hlc++"} + for old, new in symlink_files.items(): + if os.path.isfile(old): + os.remove(old) + symlink(new, old) + + @property + @llnl.util.lang.memoized + def _output_version(self): + spec_vers_str = str(self.spec.version.up_to(3)) + if "develop" in spec_vers_str: + # Remove 'develop-' from the version in spack for checking + # version against the version in the HDF5 code. + spec_vers_str = spec_vers_str.partition("-")[2] + return spec_vers_str + + @run_after("install") + @on_package_attributes(run_tests=True) + def check_install(self): + self.test_check_prog() + + def test_check_prog(self): + """build, run and check output of check.c""" + print("Checking HDF5 installation...") + prog = "check.c" + + spec = self.spec + checkdir = "spack-check" + + # Because the release number in a develop branch is not fixed, + # only the major and minor version numbers are compared. + # Otherwise all 3 numbers are checked. + fmt = "%d.%d %u.%u" + arg_line1 = "H5_VERS_MAJOR, H5_VERS_MINOR" + arg_line2 = "majnum, minnum" + if not spec.version.isdevelop(): + fmt = "%d." + fmt + ".%u" + arg_line2 = "H5_VERS_RELEASE, " + arg_line2 + ", relnum" + + source = r""" +#include +#include +#include +int main(int argc, char **argv) {{ + unsigned majnum, minnum, relnum; + herr_t herr = H5get_libversion(&majnum, &minnum, &relnum); + assert(!herr); + printf("HDF5 version {}\n", {}, + {}); + return 0; +}} +""" + + expected = f"HDF5 version {self._output_version} {self._output_version}\n" + + with working_dir(checkdir, create=True): + with open(prog, "w") as f: + f.write(source.format(fmt, arg_line1, arg_line2)) + + cc = Executable(os.environ["CC"]) + cc(*(["-c", "check.c"] + spec["hdf5"].headers.cpp_flags.split())) + cc(*(["-o", "check", "check.o"] + spec["hdf5"].libs.ld_flags.split())) + try: + check = Executable("./check") + output = check(output=str) + except ProcessError: + output = "" + success = output == expected + if not success: + print("Produced output does not match expected output.") + print("Expected output:") + print("-" * 80) + print(expected) + print("-" * 80) + print("Produced output:") + print("-" * 80) + print(output) + print("-" * 80) + raise RuntimeError("HDF5 install check failed") + shutil.rmtree(checkdir) + + def test_version(self): + """Perform version checks on selected installed package binaries.""" + expected = f"Version {self._output_version}" + + exes = [ + "h5copy", + "h5diff", + "h5dump", + "h5format_convert", + "h5ls", + "h5mkgrp", + "h5repack", + "h5stat", + "h5unjam", + ] + use_short_opt = ["h52gif", "h5repart", "h5unjam"] + for exe in exes: + reason = f"ensure version of {exe} is {self._output_version}" + option = "-V" if exe in use_short_opt else "--version" + with test_part(self, f"test_version_{exe}", purpose=reason): + path = join_path(self.prefix.bin, exe) + if not os.path.isfile(path): + raise SkipTest(f"{path} is not installed") + + prog = which(path) + output = prog(option, output=str.split, error=str.split) + assert expected in output + + def test_example(self): + """copy, dump, and diff example hdf5 file""" + test_data_dir = self.test_suite.current_test_data_dir + with working_dir(test_data_dir, create=True): + filename = "spack.h5" + h5dump = which(self.prefix.bin.h5dump) + out = h5dump(filename, output=str.split, error=str.split) + expected = get_escaped_text_output("dump.out") + check_outputs(expected, out) + + h5copy = which(self.prefix.bin.h5copy) + copyname = "test.h5" + options = ["-i", filename, "-s", "Spack", "-o", copyname, "-d", "Spack"] + h5copy(*options) + + h5diff = which(self.prefix.bin.h5diff) + h5diff(filename, copyname) diff --git a/scripts/uberenv_configs/packages/hdf5/pre-c99-comments.patch b/scripts/uberenv_configs/packages/hdf5/pre-c99-comments.patch new file mode 100644 index 000000000..97743e8fa --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/pre-c99-comments.patch @@ -0,0 +1,43 @@ +diff --git a/test/th5s.c b/test/th5s.c +index 462bc36..8e18fad 100644 +--- a/test/th5s.c ++++ b/test/th5s.c +@@ -730,8 +730,8 @@ test_h5s_zero_dim(void) + ret = H5Pset_chunk(plist_id, SPACE1_RANK, chunk_dims); + CHECK(ret, FAIL, "H5Pset_chunk"); + +- // ret = H5Pset_alloc_time(plist_id, alloc_time); +- // CHECK(ret, FAIL, "H5Pset_alloc_time"); ++ /* ret = H5Pset_alloc_time(plist_id, alloc_time); */ ++ /* CHECK(ret, FAIL, "H5Pset_alloc_time"); */ + + dset1 = H5Dcreate2(fid1, BASICDATASET1, H5T_NATIVE_INT, sid_chunk, H5P_DEFAULT, plist_id, H5P_DEFAULT); + CHECK(dset1, FAIL, "H5Dcreate2"); +diff --git a/tools/h5dump/h5dump_ddl.c b/tools/h5dump/h5dump_ddl.c +index ee6de5e..3ed6045 100644 +--- a/tools/h5dump/h5dump_ddl.c ++++ b/tools/h5dump/h5dump_ddl.c +@@ -1341,8 +1341,8 @@ handle_attributes(hid_t fid, const char *attr, void UNUSED * data, int UNUSED pe + string_dataformat.do_escape = display_escape; + outputformat = &string_dataformat; + +- //attr_name = attr + j + 1; +- // need to replace escape characters ++ /* attr_name = attr + j + 1; */ ++ /* need to replace escape characters */ + attr_name = h5tools_str_replace(attr + j + 1, "\\/", "/"); + + +diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c +index 9ce3524..3b4e5e7 100644 +--- a/tools/lib/h5tools_str.c ++++ b/tools/lib/h5tools_str.c +@@ -632,7 +632,7 @@ h5tools_str_indent(h5tools_str_t *str, const h5tool_format_t *info, + h5tools_str_append(str, "%s", OPT(info->line_indent, "")); + } + +-// ctx->need_prefix = 0; ++/* ctx->need_prefix = 0; */ + } + + /*------------------------------------------------------------------------- diff --git a/scripts/uberenv_configs/packages/hdf5/test/dump.out b/scripts/uberenv_configs/packages/hdf5/test/dump.out new file mode 100644 index 000000000..58decefc1 --- /dev/null +++ b/scripts/uberenv_configs/packages/hdf5/test/dump.out @@ -0,0 +1,45 @@ +HDF5 "spack.h5" { +GROUP "/" { + GROUP "Spack" { + GROUP "Software" { + ATTRIBUTE "Distribution" { + DATATYPE H5T_STRING { + STRSIZE H5T_VARIABLE; + STRPAD H5T_STR_NULLTERM; + CSET H5T_CSET_UTF8; + CTYPE H5T_C_S1; + } + DATASPACE SCALAR + DATA { + (0): "Open Source" + } + } + DATASET "data" { + DATATYPE H5T_IEEE_F64LE + DATASPACE SIMPLE { ( 7, 11 ) / ( 7, 11 ) } + DATA { + (0,0): 0.371141, 0.508482, 0.585975, 0.0944911, 0.684849, + (0,5): 0.580396, 0.720271, 0.693561, 0.340432, 0.217145, + (0,10): 0.636083, + (1,0): 0.686996, 0.773501, 0.656767, 0.617543, 0.226132, + (1,5): 0.768632, 0.0548711, 0.54572, 0.355544, 0.591548, + (1,10): 0.233007, + (2,0): 0.230032, 0.192087, 0.293845, 0.0369338, 0.038727, + (2,5): 0.0977931, 0.966522, 0.0821391, 0.857921, 0.495703, + (2,10): 0.746006, + (3,0): 0.598494, 0.990266, 0.993009, 0.187481, 0.746391, + (3,5): 0.140095, 0.122661, 0.929242, 0.542415, 0.802758, + (3,10): 0.757941, + (4,0): 0.372124, 0.411982, 0.270479, 0.950033, 0.329948, + (4,5): 0.936704, 0.105097, 0.742285, 0.556565, 0.18988, 0.72797, + (5,0): 0.801669, 0.271807, 0.910649, 0.186251, 0.868865, + (5,5): 0.191484, 0.788371, 0.920173, 0.582249, 0.682022, + (5,10): 0.146883, + (6,0): 0.826824, 0.0886705, 0.402606, 0.0532444, 0.72509, + (6,5): 0.964683, 0.330362, 0.833284, 0.630456, 0.411489, 0.247806 + } + } + } + } +} +} diff --git a/scripts/uberenv_configs/packages/hdf5/test/spack.h5 b/scripts/uberenv_configs/packages/hdf5/test/spack.h5 new file mode 100644 index 000000000..c2f3a6f39 Binary files /dev/null and b/scripts/uberenv_configs/packages/hdf5/test/spack.h5 differ diff --git a/scripts/uberenv_configs/packages/silo/48-configure-f77.patch b/scripts/uberenv_configs/packages/silo/48-configure-f77.patch new file mode 100644 index 000000000..2af6fae7f --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/48-configure-f77.patch @@ -0,0 +1,12 @@ +diff --git a/configure.ac b/configure.ac +index 94e2a8a..fd7fbad 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -578,6 +578,7 @@ AC_PROG_CC + AC_PROG_CPP + if test -n "$FORTRAN"; then + AC_PROG_FC ++ AC_PROG_F77 + AC_FC_LIBRARY_LDFLAGS + AC_FC_WRAPPERS + fi diff --git a/scripts/uberenv_configs/packages/silo/H5EPR_SEMI_COLON.patch b/scripts/uberenv_configs/packages/silo/H5EPR_SEMI_COLON.patch new file mode 100644 index 000000000..023f0bbe6 --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/H5EPR_SEMI_COLON.patch @@ -0,0 +1,46 @@ +diff --git a/src/hdf5_drv/H5FDsilo.c b/src/hdf5_drv/H5FDsilo.c +index 840dfd0..0153e18 100644 +--- a/src/hdf5_drv/H5FDsilo.c ++++ b/src/hdf5_drv/H5FDsilo.c +@@ -255,13 +255,13 @@ static const char *flavors(H5F_mem_t m) + snprintf(msg, sizeof(msg), Msg "(errno=%d, \"%s\")", \ + Errno, strerror(Errno)); \ + ret_value = Ret; \ +- H5Epush_ret(Func, Cls, Maj, Min, msg, Ret) \ ++ H5Epush_ret(Func, Cls, Maj, Min, msg, Ret) ; \ + } + #else + #define H5E_PUSH_HELPER(Func,Cls,Maj,Min,Msg,Ret,Errno) \ + { \ + ret_value = Ret; \ +- H5Epush_ret(Func, Cls, Maj, Min, Msg, Ret) \ ++ H5Epush_ret(Func, Cls, Maj, Min, Msg, Ret) ; \ + } + #endif + +@@ -1308,7 +1308,7 @@ H5FD_silo_sb_encode(H5FD_t *_file, char *name/*out*/, + assert(sizeof(hsize_t)<=8); + memcpy(p, &file->block_size, sizeof(hsize_t)); + if (H5Tconvert(H5T_NATIVE_HSIZE, H5T_STD_U64LE, 1, buf+8, NULL, H5P_DEFAULT)<0) +- H5Epush_ret(func, H5E_ERR_CLS, H5E_DATATYPE, H5E_CANTCONVERT, "can't convert superblock info", -1) ++ H5Epush_ret(func, H5E_ERR_CLS, H5E_DATATYPE, H5E_CANTCONVERT, "can't convert superblock info", -1) ; + + return 0; + } +@@ -1336,14 +1336,14 @@ H5FD_silo_sb_decode(H5FD_t *_file, const char *name, const unsigned char *buf) + + /* Make sure the name/version number is correct */ + if (strcmp(name, "LLNLsilo")) +- H5Epush_ret(func, H5E_ERR_CLS, H5E_FILE, H5E_BADVALUE, "invalid silo superblock", -1) ++ H5Epush_ret(func, H5E_ERR_CLS, H5E_FILE, H5E_BADVALUE, "invalid silo superblock", -1) ; + + buf += 8; + /* Decode block size */ + assert(sizeof(hsize_t)<=8); + memcpy(x, buf, 8); + if (H5Tconvert(H5T_STD_U64LE, H5T_NATIVE_HSIZE, 1, x, NULL, H5P_DEFAULT)<0) +- H5Epush_ret(func, H5E_ERR_CLS, H5E_DATATYPE, H5E_CANTCONVERT, "can't convert superblock info", -1) ++ H5Epush_ret(func, H5E_ERR_CLS, H5E_DATATYPE, H5E_CANTCONVERT, "can't convert superblock info", -1) ; + ap = (hsize_t*)x; + /*file->block_size = *ap; ignore stored value for now */ + diff --git a/scripts/uberenv_configs/packages/silo/H5FD_class_t-terminate.patch b/scripts/uberenv_configs/packages/silo/H5FD_class_t-terminate.patch new file mode 100644 index 000000000..8f88e2aea --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/H5FD_class_t-terminate.patch @@ -0,0 +1,14 @@ +diff --git a/src/hdf5_drv/H5FDsilo.c b/src/hdf5_drv/H5FDsilo.c +index 840dfd0..0d09147 100644 +--- a/src/hdf5_drv/H5FDsilo.c ++++ b/src/hdf5_drv/H5FDsilo.c +@@ -500,6 +500,9 @@ static const H5FD_class_t H5FD_silo_g = { + "silo", /*name */ + MAXADDR, /*maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ ++#if HDF5_VERSION_GE(1,10,0) ++ 0, /* terminate */ ++#endif + H5FD_silo_sb_size, /*sb_size */ + H5FD_silo_sb_encode, /*sb_encode */ + H5FD_silo_sb_decode, /*sb_decode */ diff --git a/scripts/uberenv_configs/packages/silo/configure-AX_CHECK_COMPILE_FLAG.patch b/scripts/uberenv_configs/packages/silo/configure-AX_CHECK_COMPILE_FLAG.patch new file mode 100644 index 000000000..9f6f4f22f --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/configure-AX_CHECK_COMPILE_FLAG.patch @@ -0,0 +1,19 @@ +diff --git a/configure.ac b/configure.ac +index 94e2a8a..3449233 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -762,10 +763,10 @@ dnl + # it is an argument to the -D argument. So, I think this is + # just totally bogus! + # Default to large file support +-AX_CHECK_COMPILER_FLAGS("-D_LARGEFILE_SOURCE",CFLAGS="$CFLAGS -D_LARGEFILE_SOURCE";) +-AX_CHECK_COMPILER_FLAGS("-D_LARGEFILE64_SOURCE",CFLAGS="$CFLAGS -D_LARGEFILE64_SOURCE";) +-AX_CHECK_COMPILER_FLAGS("-D_FILE_OFFSET_BITS=64",CFLAGS="$CFLAGS -D_FILE_OFFSET_BITS=64";) +-AX_CHECK_COMPILER_FLAGS("-Wdeclaration-after-statement",CFLAGS="$CFLAGS -Wdeclaration-after-statement";) ++AX_CHECK_COMPILE_FLAG("-D_LARGEFILE_SOURCE",CFLAGS="$CFLAGS -D_LARGEFILE_SOURCE";) ++AX_CHECK_COMPILE_FLAG("-D_LARGEFILE64_SOURCE",CFLAGS="$CFLAGS -D_LARGEFILE64_SOURCE";) ++AX_CHECK_COMPILE_FLAG("-D_FILE_OFFSET_BITS=64",CFLAGS="$CFLAGS -D_FILE_OFFSET_BITS=64";) ++AX_CHECK_COMPILE_FLAG("-Wdeclaration-after-statement",CFLAGS="$CFLAGS -Wdeclaration-after-statement";) + + # + # Note: regardless of what the stuff above regarding large file support diff --git a/scripts/uberenv_configs/packages/silo/hdf5-113.patch b/scripts/uberenv_configs/packages/silo/hdf5-113.patch new file mode 100644 index 000000000..b71bd8526 --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/hdf5-113.patch @@ -0,0 +1,40 @@ +From 5dc160c7ae489b8181874dccf7ce3b8089c128f5 Mon Sep 17 00:00:00 2001 +From: Chuck Atkins +Date: Tue, 24 May 2022 10:57:29 -0400 +Subject: [PATCH] hdf5: Support the API changes in >= 1.13 + +--- + src/hdf5_drv/H5FDsilo.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +diff --git a/src/hdf5_drv/H5FDsilo.c b/src/hdf5_drv/H5FDsilo.c +index f454045..74594c1 100644 +--- a/src/hdf5_drv/H5FDsilo.c ++++ b/src/hdf5_drv/H5FDsilo.c +@@ -550,6 +550,12 @@ typedef struct H5FD_class_t { + #endif + + static const H5FD_class_t H5FD_silo_g = { ++#if HDF5_VERSION_GE(1,13,2) && defined(H5FD_CLASS_VERSION) ++ H5FD_CLASS_VERSION, /*version */ ++#endif ++#if HDF5_VERSION_GE(1,13,0) ++ 512, /*value */ ++#endif + "silo", /*name */ + MAXADDR, /*maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ +@@ -583,6 +589,10 @@ static const H5FD_class_t H5FD_silo_g = { + H5FD_silo_truncate, /*truncate */ + NULL, /*lock */ + NULL, /*unlock */ ++#if HDF5_VERSION_GE(1,13,0) ++ NULL, /*del */ ++ NULL, /*ctl */ ++#endif + H5FD_FLMAP_SINGLE /*fl_map */ + }; + +-- +2.36.1 + diff --git a/scripts/uberenv_configs/packages/silo/mkinc-usr-bin-env-perl.patch b/scripts/uberenv_configs/packages/silo/mkinc-usr-bin-env-perl.patch new file mode 100644 index 000000000..13f233010 --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/mkinc-usr-bin-env-perl.patch @@ -0,0 +1,23 @@ +diff --git a/config/mkinc b/config/mkinc +index e52bb64..6d96176 100755 +--- a/config/mkinc ++++ b/config/mkinc +@@ -17,7 +17,7 @@ exec perl /tmp/visitperl$$ $0 ${1+"$@"} + unlink $0; + $0 = shift @ARGV; + +-#!/usr/bin/perl ++#!/usr/bin/env perl + ######################################################################### + # Copyright (C) 1994-2016 Lawrence Livermore National Security, LLC. + # LLNL-CODE-425250. +diff --git a/config/mklite b/config/mklite +index f9394a9..274c867 100755 +--- a/config/mklite ++++ b/config/mklite +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + # + # Tool to generate lite_pdb.h file from pdb.h and score.h headers. diff --git a/scripts/uberenv_configs/packages/silo/package.py b/scripts/uberenv_configs/packages/silo/package.py new file mode 100644 index 000000000..17cd15bb5 --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/package.py @@ -0,0 +1,258 @@ +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +from spack.package import * +from spack.util.environment import is_system_path + + +class Silo(AutotoolsPackage): + """Silo is a library for reading and writing a wide variety of scientific + data to binary, disk files.""" + + homepage = "https://wci.llnl.gov/simulation/computer-codes/silo" + git = "https://github.com/LLNL/Silo.git" + url = "https://wci.llnl.gov/sites/wci/files/2021-01/silo-4.10.2.tgz" + maintainers("patrickb314") + + # note: 4.12 cmake build system is default, but we need a bigger spack + # recipe update to leverage that + version( + "4.12.0", + preferred=True, + sha256="bde1685e4547d5dd7416bd6215b41f837efef0e4934d938ba776957afbebdff0", + url="https://github.com/LLNL/Silo/releases/download/4.12.0/Silo-4.12.0.tar.xz", + ) + version( + "4.11.1", + sha256="49eddc00304aa4a19074b099559edbdcaa3532c98df32f99aa62b9ec3ea7cee2", + url="https://github.com/LLNL/Silo/releases/download/4.11.1/silo-4.11.1.tar.xz", + ) + version( + "4.11.1-bsd", + sha256="51ccfdf3c09dfc98c7858a0a6f08cc3b2a07ee3c4142ee6482ba7b24e314c2aa", + url="https://github.com/LLNL/Silo/releases/download/4.11.1/silo-4.11.1-bsd.tar.xz", + ) + version( + "4.11", + sha256="ab936c1f4fc158d9fdc4415965f7d9def7f4abeca596fe5a25bd8485654898ac", + url="https://github.com/LLNL/Silo/releases/download/v4.11/silo-4.11.tar.gz", + ) + version( + "4.11-bsd", + sha256="6d0a85a079d48fcdcc0084ecb5fc4cfdcc64852edee780c60cb244d16f4bc4ec", + url="https://github.com/LLNL/Silo/releases/download/v4.11/silo-4.11-bsd.tar.gz", + ) + version( + "4.10.2", + sha256="3af87e5f0608a69849c00eb7c73b11f8422fa36903dd14610584506e7f68e638", + preferred=True, + ) + version( + "4.10.2-bsd", + sha256="4b901dfc1eb4656e83419a6fde15a2f6c6a31df84edfad7f1dc296e01b20140e", + url="https://wci.llnl.gov/sites/wci/files/2021-01/silo-4.10.2-bsd.tgz", + ) + version("4.9", sha256="90f3d069963d859c142809cfcb034bc83eb951f61ac02ccb967fc8e8d0409854") + version("4.8", sha256="c430c1d33fcb9bc136a99ad473d535d6763bd1357b704a915ba7b1081d58fb21") + + depends_on("c", type="build") # generated + depends_on("cxx", type="build") # generated + depends_on("fortran", type="build") # generated + + variant("python", default=True, description="Enable Python support") + variant("fortran", default=True, description="Enable Fortran support") + variant("shared", default=True, description="Build shared libraries") + variant("silex", default=False, description="Builds Silex, a GUI for viewing Silo files") + variant("pic", default=True, description="Produce position-independent code (for shared libs)") + variant("mpi", default=True, description="Compile with MPI Compatibility") + variant("hdf5", default=True, description="Support HDF5 for database I/O") + variant("hzip", default=True, description="Enable hzip support") + variant("fpzip", default=True, description="Enable fpzip support") + + depends_on("perl", type="build") + depends_on("m4", type="build", when="+shared") + depends_on("autoconf", type="build", when="+shared") + depends_on("autoconf-archive", type="build", when="+shared") + depends_on("automake", type="build", when="+shared") + depends_on("libtool", type="build", when="+shared") + depends_on("mpi", when="+mpi") + depends_on("hdf5@1.8:1.10", when="@:4.10+hdf5") + depends_on("hdf5@1.12:", when="@4.11:+hdf5") + depends_on("qt+gui~framework@4.8:4.9", when="+silex") + depends_on("libx11", when="+silex") + # Xmu dependency is required on Ubuntu 18-20 + depends_on("libxmu", when="+silex") + depends_on("readline") + depends_on("zlib-api") + + patch("remove-mpiposix.patch", when="@4.8:4.10.2") + + # hdf5 1.10 added an additional field to the H5FD_class_t struct + patch("H5FD_class_t-terminate.patch", when="@:4.10.2-bsd") + + # H5EPR_SEMI_COLON.patch was fixed in current dev + patch("H5EPR_SEMI_COLON.patch", when="@:4.11-bsd") + + # Fix missing F77 init, fixed in 4.9 + patch("48-configure-f77.patch", when="@:4.8") + + # The previously used AX_CHECK_COMPILER_FLAGS macro was dropped from + # autoconf-archive in 2011 + patch("configure-AX_CHECK_COMPILE_FLAG.patch", when="@:4.11-bsd") + + # API changes in hdf5-1.13 cause breakage + # See https://github.com/LLNL/Silo/pull/260 + patch("hdf5-113.patch", when="@4.11:4.11-bsd +hdf5 ^hdf5@1.13:") + conflicts("^hdf5@1.13:", when="@:4.10.2-bsd") + + # hzip and fpzip are not available in the BSD releases + conflicts("+hzip", when="@4.10.2-bsd,4.11-bsd") + conflicts("+fpzip", when="@4.10.2-bsd,4.11-bsd") + + # zfp include missing + patch("zfp_error.patch", when="@4.11:4.11-bsd +hdf5") + + # use /usr/bin/env perl for portability + patch("mkinc-usr-bin-env-perl.patch", when="@:4.11-bsd") + + def flag_handler(self, name, flags): + spec = self.spec + if name == "ldflags": + if "+hdf5" in spec: + if spec["hdf5"].satisfies("~shared"): + flags.append("-ldl") + + if "+pic" in spec: + if name == "cflags": + flags.append(self.compiler.cc_pic_flag) + elif name == "cxxflags": + flags.append(self.compiler.cxx_pic_flag) + elif name == "fcflags": + flags.append(self.compiler.fc_pic_flag) + if name == "cflags" or name == "cxxflags": + if spec.satisfies("%oneapi"): + flags.append("-Wno-error=int") + flags.append("-Wno-error=int-conversion") + if "+hdf5" in spec: + # @:4.10 can use up to the 1.10 API + if "@:4.10" in spec: + if "@1.10:" in spec["hdf5"]: + flags.append("-DH5_USE_110_API") + elif "@1.8:" in spec["hdf5"]: + # Just in case anytone is trying to force the 1.6 api for + # some reason + flags.append("-DH5_USE_18_API") + else: + # @4.11: can use newer HDF5 APIs, so this ensures silo is + # presented with an HDF5 API consistent with the HDF5 version. + # Use the latest even-numbered API version, i.e. v1.13.1 uses + # API v1.12 + + # hdf5 support branches have a `develop` prefix + if "develop" in str(spec["hdf5"].version): + maj_ver = int(spec["hdf5"].version[1]) + min_ver = int(spec["hdf5"].version[2]) + else: + maj_ver = int(spec["hdf5"].version[0]) + min_ver = int(spec["hdf5"].version[1]) + min_apiver = int(min_ver / 2) * 2 + flags.append("-DH5_USE_{0}{1}_API".format(maj_ver, min_apiver)) + + if spec.satisfies("%clang") or spec.satisfies("%apple-clang"): + flags.append("-Wno-implicit-function-declaration") + return (flags, None, None) + + @when("%clang@9:") + def patch(self): + self.clang_9_patch() + + @when("%apple-clang@11.0.3:") + def patch(self): + self.clang_9_patch() + + def clang_9_patch(self): + # Clang 9 and later include macro definitions in that conflict + # with typedefs DOMAIN and RANGE used in Silo plugins. + # It looks like the upstream fpzip repo has been fixed, but that change + # hasn't yet made it into silo. + # https://github.com/LLNL/fpzip/blob/master/src/pcmap.h + + if str(self.spec.version).endswith("-bsd"): + # The files below don't exist in the BSD licenced version + return + + def repl(match): + # Change macro-like uppercase to title-case. + return match.group(1).title() + + files_to_filter = [ + "src/fpzip/codec.h", + "src/fpzip/pcdecoder.inl", + "src/fpzip/pcencoder.inl", + "src/fpzip/pcmap.h", + "src/fpzip/pcmap.inl", + "src/fpzip/read.cpp", + "src/fpzip/write.cpp", + "src/hzip/hzmap.h", + "src/hzip/hzresidual.h", + ] + + filter_file(r"\b(DOMAIN|RANGE|UNION)\b", repl, *files_to_filter) + + @property + def force_autoreconf(self): + # Update autoconf's tests whether libtool supports shared libraries. + # (Otherwise, shared libraries are always disabled on Darwin.) + if self.spec.satisfies("@4.11-bsd") or self.spec.satisfies("@4.10.2-bsd"): + return False + else: + return self.spec.satisfies("+shared") + + def configure_args(self): + spec = self.spec + config_args = ["--enable-install-lite-headers"] + + config_args.extend(self.enable_or_disable("pythonmodule", variant="python")) + config_args.extend(self.enable_or_disable("fortran")) + config_args.extend(self.enable_or_disable("silex")) + config_args.extend(self.enable_or_disable("shared")) + config_args.extend(self.enable_or_disable("hzip")) + config_args.extend(self.enable_or_disable("fpzip")) + + # Do not specify the prefix of zlib if it is in a system directory + # (see https://github.com/spack/spack/pull/21900). + zlib_prefix = self.spec["zlib-api"].prefix + if is_system_path(zlib_prefix): + config_args.append("--with-zlib=yes") + else: + config_args.append("--with-zlib=%s,%s" % (zlib_prefix.include, zlib_prefix.lib)) + + if "+hdf5" in spec: + config_args.append( + "--with-hdf5=%s,%s" % (spec["hdf5"].prefix.include, spec["hdf5"].prefix.lib) + ) + + if "+silex" in spec: + x = spec["libx11"] + config_args.extend( + [ + "--with-Qt-dir=" + spec["qt"].prefix, + "--with-Qt-lib=QtGui -lQtCore", + "--x-includes=" + x.prefix.include, + "--x-libraries=" + x.prefix.lib, + ] + ) + + if "+mpi" in spec: + config_args.append("CC=%s" % spec["mpi"].mpicc) + config_args.append("CXX=%s" % spec["mpi"].mpicxx) + config_args.append("FC=%s" % spec["mpi"].mpifc) + + return config_args + + @property + def libs(self): + shared = "+shared" in self.spec + return find_libraries("libsilo*", root=self.prefix, shared=shared, recursive=True) diff --git a/scripts/uberenv_configs/packages/silo/remove-mpiposix.patch b/scripts/uberenv_configs/packages/silo/remove-mpiposix.patch new file mode 100644 index 000000000..1c6449f0c --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/remove-mpiposix.patch @@ -0,0 +1,40 @@ +Index: silo-llnl-4.9.1/src/hdf5_drv/silo_hdf5.c +=================================================================== +--- silo-llnl-4.9.1.orig/src/hdf5_drv/silo_hdf5.c ++++ silo-llnl-4.9.1/src/hdf5_drv/silo_hdf5.c +@@ -4717,16 +4717,7 @@ db_hdf5_process_file_options(opts_set_id + + /* default HDF5 mpi drivers */ + case DB_FILE_OPTS_H5_DEFAULT_MPIP: +- { +-#ifdef H5_HAVE_PARALLEL +- h5status |= H5Pset_fapl_mpiposix(retval, MPI_COMM_SELF, TRUE); +-#else +- H5Pclose(retval); +- return db_perror("HDF5 MPI VFD", E_NOTENABLEDINBUILD, me); +-#endif +- break; +- } +- ++ /* FALLTHROUGH */ + case DB_FILE_OPTS_H5_DEFAULT_MPIO: + { + #ifdef H5_HAVE_PARALLEL +@@ -4963,15 +4954,8 @@ db_hdf5_process_file_options(opts_set_id + if (p = DBGetOption(opts, DBOPT_H5_MPIP_NO_GPFS_HINTS)) + use_gpfs_hints = FALSE; + +- if (vfd == DB_H5VFD_MPIO) +- { +- h5status |= H5Pset_fapl_mpio(retval, mpi_comm, mpi_info); +- if (created_info) MPI_Info_free(&mpi_info); +- } +- else +- { +- h5status |= H5Pset_fapl_mpiposix(retval, mpi_comm, use_gpfs_hints); +- } ++ h5status |= H5Pset_fapl_mpio(retval, mpi_comm, mpi_info); ++ if (created_info) MPI_Info_free(&mpi_info); + #else + H5Pclose(retval); + return db_perror("HDF5 MPI VFD", E_NOTENABLEDINBUILD, me); diff --git a/scripts/uberenv_configs/packages/silo/zfp_error.patch b/scripts/uberenv_configs/packages/silo/zfp_error.patch new file mode 100644 index 000000000..eec4282a0 --- /dev/null +++ b/scripts/uberenv_configs/packages/silo/zfp_error.patch @@ -0,0 +1,11 @@ +diff -ru silo/src/hdf5_drv/silo_hdf5.c silo.fixed/src/hdf5_drv/silo_hdf5.c +--- silo/src/hdf5_drv/silo_hdf5.c 2021-09-09 12:35:00.000000000 -0700 ++++ silo.fixed/src/hdf5_drv/silo_hdf5.c 2022-12-02 10:34:34.560531000 -0800 +@@ -198,6 +198,7 @@ + #endif + #ifdef HAVE_ZFP + #include "H5Zzfp.h" ++extern void zfp_init_zfp(); + #endif + + /* Defining these to check overhead of PROTECT */ diff --git a/src/libs/conduit/conduit_data_array.cpp b/src/libs/conduit/conduit_data_array.cpp index ababf487b..df97f8ca8 100644 --- a/src/libs/conduit/conduit_data_array.cpp +++ b/src/libs/conduit/conduit_data_array.cpp @@ -2231,7 +2231,7 @@ template class DataArray; #endif #ifdef CONDUIT_USE_LONG_DOUBLE - ltemplate class DataArray; +template class DataArray; #endif } diff --git a/src/libs/relay/conduit_relay_io_hdf5.cpp b/src/libs/relay/conduit_relay_io_hdf5.cpp index d0815f28a..0e6f6b37d 100644 --- a/src/libs/relay/conduit_relay_io_hdf5.cpp +++ b/src/libs/relay/conduit_relay_io_hdf5.cpp @@ -542,6 +542,213 @@ class HDF5ErrorStackSuppressor bool active; }; + +//----------------------------------------------------------------------------- +// Private class used manage and automatically cleanup HDF5 handles. +// +// Base template is templated on structs that implement close using HDF5 API +// to cover HDF5 O/F/G/D/S/T/P/A types. +// +//----------------------------------------------------------------------------- +template +class RelayH5Handle +{ +public: + // construct with ref parent id and path + RelayH5Handle(hid_t handle_id, + hid_t parent_id, + const std::string &ref_path) + : m_id(handle_id), + m_parent_id(parent_id), + m_ref_path(ref_path) + { + //// for debugging + // std::cout << "Construct Handle[" << Handler::name << "] " + // << m_ref_path << " id: " << m_id << std::endl; + } + + // construct with ref path + RelayH5Handle(hid_t handle_id, + const std::string &ref_path) + : m_id(handle_id), + m_parent_id(-1), + m_ref_path(ref_path) + { + //// for debugging + // std::cout << "Construct Handle[" << Handler::name << "] " + // << m_ref_path << " id: " << m_id << std::endl; + } + + // destruct, close handle if needed + ~RelayH5Handle() + { + close(); + } + + // fetch hdf5 handle id + hid_t id() const + { + return m_id; + } + + // check if handle id is valid + bool is_valid() const + { + return m_id > 0; + } + + // set new handle id, does not close + void set_id(hid_t id) + { + m_id = id; + //// for debugging + // std::cout << "Set Handle[" << Handler::name << "] " + // << m_ref_path << " id: " << m_id << std::endl; + } + + // helper to check if id was created correctly + void check_created() + { + if(!is_valid()) + { + if(m_parent_id != -1) + { + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(m_id, + m_parent_id, + m_ref_path, + "Failed to create HDF5 " << Handler::name); + } + else + { + CONDUIT_CHECK_HDF5_ERROR_WITH_REF_PATH(m_id, + m_ref_path, + "Failed to create HDF5 " << Handler::name); + } + + m_id = -1; + } + } + + // helper close handle + void close() + { + if(is_valid()) + { + //// for debugging + // std::cout << "Close[" << Handler::name << "] " + // << m_ref_path << " id: " << m_id << std::endl; + herr_t res = Handler::close_handle(m_id); + if(m_parent_id != -1) + { + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(res, + m_parent_id, + m_ref_path, + "Failed to close HDF5 " << Handler::name << " : " << m_id); + } + else + { + CONDUIT_CHECK_HDF5_ERROR_WITH_REF_PATH(res, + m_ref_path, + "Failed to close HDF5 " << Handler::name << " : " << m_id); + } + m_id = -1; + } + } + +private: + hid_t m_id; + hid_t m_parent_id; + const std::string &m_ref_path; // TODO: c++17 string_view +}; + +//-----------------------------------------------------------------// +// helper structs +//-----------------------------------------------------------------// + +//-----------------------------------------------------------------// +// H5O +struct RelayH5OHandler +{ + static constexpr const char* name = "Object"; + static herr_t close_handle(hid_t id){return H5Oclose(id);} +}; + +//-----------------------------------------------------------------// +// H5F +//-----------------------------------------------------------------// +struct RelayH5FHandler +{ + static constexpr const char* name = "File"; + static herr_t close_handle(hid_t id){return H5Fclose(id);} +}; + +//-----------------------------------------------------------------// +// H5G +//-----------------------------------------------------------------// +struct RelayH5GHandler +{ + static constexpr const char* name = "Group"; + static herr_t close_handle(hid_t id){return H5Gclose(id);} +}; + +//-----------------------------------------------------------------// +// H5D +//-----------------------------------------------------------------// +struct RelayH5DHandler +{ + static constexpr const char* name = "Dataset"; + static herr_t close_handle(hid_t id){return H5Dclose(id);} +}; + +//-----------------------------------------------------------------// +// H5S +//-----------------------------------------------------------------// +struct RelayH5SHandler +{ + static constexpr const char* name = "Dataspace"; + static herr_t close_handle(hid_t id){return H5Sclose(id);} +}; + +//-----------------------------------------------------------------// +// H5T +//-----------------------------------------------------------------// +struct RelayH5THandler +{ + static constexpr const char* name = "Datatype"; + static herr_t close_handle(hid_t id){return H5Tclose(id);} +}; + +//-----------------------------------------------------------------// +// H5P +//-----------------------------------------------------------------// +struct RelayH5PHandler +{ + static constexpr const char* name = "Property List"; + static herr_t close_handle(hid_t id){return H5Pclose(id);} +}; + +//-----------------------------------------------------------------// +// H5A +//-----------------------------------------------------------------// +struct RelayH5AHandler +{ + static constexpr const char* name = "Attribute"; + static herr_t close_handle(hid_t id){return H5Aclose(id);} +}; + +//-----------------------------------------------------------------// +// Concrete Handle Classes +//-----------------------------------------------------------------// +typedef RelayH5Handle RelayH5OHandle; +typedef RelayH5Handle RelayH5FHandle; +typedef RelayH5Handle RelayH5GHandle; +typedef RelayH5Handle RelayH5DHandle; +typedef RelayH5Handle RelayH5SHandle; +typedef RelayH5Handle RelayH5THandle; +typedef RelayH5Handle RelayH5PHandle; +typedef RelayH5Handle RelayH5AHandle; + + //----------------------------------------------------------------------------- // helper method decls //----------------------------------------------------------------------------- @@ -715,7 +922,6 @@ fill_dataset_opts(const std::string & ref_path, const Node& inopts, hsize_t* make_dataset_opt_copy(const Node& opts, const std::string opt_name); - //----------------------------------------------------------------------------- // helper used to properly create a new ref_path for a child std::string @@ -851,36 +1057,11 @@ conduit_dtype_to_hdf5_dtype(const DataType &dt, }; } - return res; -} - -//----------------------------------------------------------------------------- -// cleanup conduit created hdf5 dtype -// (effectively a noop, except for the string case) -// TODO: This could be a macro ... ? -//----------------------------------------------------------------------------- -void -conduit_dtype_to_hdf5_dtype_cleanup(hid_t hdf5_dtype_id, - const std::string &ref_path) -{ - // NOTE: This cleanup won't be triggered when we use thee - // based H5T_C_S1 with a data space that encodes # of elements - // (Our current path, given our logic to encode string size in the - // hdf5 type is disabled ) - - // if this is a string using a custom type we need to cleanup - // the conduit_dtype_to_hdf5_dtype result - if( (! H5Tequal(hdf5_dtype_id, H5T_C_S1) ) && - (H5Tget_class(hdf5_dtype_id) == H5T_STRING ) ) - { - CONDUIT_CHECK_HDF5_ERROR_WITH_REF_PATH(H5Tclose(hdf5_dtype_id), - ref_path, - "Failed to close HDF5 string Type " - << hdf5_dtype_id); - } + // return a copy of built in type so that we have he same ref count + // semanitcs as other objects + return H5Tcopy(res); } - //----------------------------------------------------------------------------- DataType hdf5_dtype_to_conduit_dtype(hid_t hdf5_dtype_id, @@ -1099,7 +1280,6 @@ hdf5_ref_path_with_filename(hid_t hdf5_id, } } - //---------------------------------------------------------------------------// // Write Helpers //---------------------------------------------------------------------------// @@ -1126,9 +1306,11 @@ check_if_conduit_leaf_is_compatible_with_hdf5_obj(const DataType &dtype, ( h5_obj_info.type == H5O_TYPE_DATASET ) ) { // get the hdf5 dataspace for the passed hdf5 obj - hid_t h5_test_dspace = H5Dget_space(hdf5_id); + RelayH5SHandle h5_test_dspace_hnd(H5Dget_space(hdf5_id), + hdf5_id, + ref_path); - if( H5Sget_simple_extent_type(h5_test_dspace) == H5S_NULL ) + if( H5Sget_simple_extent_type(h5_test_dspace_hnd.id()) == H5S_NULL ) { // a dataset with H5S_NULL data space is only compatible with // conduit empty @@ -1149,29 +1331,35 @@ check_if_conduit_leaf_is_compatible_with_hdf5_obj(const DataType &dtype, else { // get the hdf5 datatype that matchs the conduit dtype - hid_t h5_dtype = conduit_dtype_to_hdf5_dtype(dtype, - ref_path); + RelayH5THandle h5_dtype_hnd(conduit_dtype_to_hdf5_dtype(dtype, + ref_path), + hdf5_id, + ref_path); + h5_dtype_hnd.check_created(); // get the hdf5 datatype for the passed hdf5 obj - hid_t h5_test_dtype = H5Dget_type(hdf5_id); + RelayH5THandle h5_test_dtype_hnd(H5Dget_type(hdf5_id), + hdf5_id, + ref_path); + h5_test_dtype_hnd.check_created(); // we will check the 1d-properties of the hdf5 dataspace - hssize_t h5_test_num_ele = H5Sget_simple_extent_npoints(h5_test_dspace); + hssize_t h5_test_num_ele = H5Sget_simple_extent_npoints(h5_test_dspace_hnd.id()); hsize_t dataset_max_dims[1]; - H5Sget_simple_extent_dims(h5_test_dspace, NULL, dataset_max_dims); + H5Sget_simple_extent_dims(h5_test_dspace_hnd.id(), NULL, dataset_max_dims); // string case is special, check it first // if the dataset in the file is a custom string type // check the type's size vs the # of elements - if( ( ! H5Tequal(h5_test_dtype, H5T_C_S1) && - ( H5Tget_class(h5_test_dtype) == H5T_STRING ) && - ( H5Tget_class(h5_dtype) == H5T_STRING ) ) && + if( ( ! H5Tequal(h5_test_dtype_hnd.id(), H5T_C_S1) && + ( H5Tget_class(h5_test_dtype_hnd.id()) == H5T_STRING ) && + ( H5Tget_class(h5_dtype_hnd.id()) == H5T_STRING ) ) && // if not shorted out, we have a string w/ custom type // check length to see if compat // note: both hdf5 and conduit dtypes include null term in string size - (dtype.number_of_elements() != (index_t)H5Tget_size(h5_test_dtype) ) ) + (dtype.number_of_elements() != (index_t)H5Tget_size(h5_test_dtype_hnd.id()) ) ) { std::ostringstream oss; oss << "Conduit Node (string leaf) at path '" << ref_path << "'" @@ -1179,13 +1367,13 @@ check_if_conduit_leaf_is_compatible_with_hdf5_obj(const DataType &dtype, << " '" << ref_path << "'" << "\nConduit leaf String Node length (" << dtype.number_of_elements() << ")" - << " != HDF5 Dataset size (" << H5Tget_size(h5_test_dtype) << ")"; + << " != HDF5 Dataset size (" << H5Tget_size(h5_test_dtype_hnd.id()) << ")"; incompat_details = oss.str(); res = false; } - else if( ! (H5Tequal(h5_dtype, h5_test_dtype) > 0) ) + else if( ! (H5Tequal(h5_dtype_hnd.id(), h5_test_dtype_hnd.id()) > 0) ) { std::ostringstream oss; @@ -1208,26 +1396,19 @@ check_if_conduit_leaf_is_compatible_with_hdf5_obj(const DataType &dtype, << " '" << ref_path << "'" << "\nConduit leaf Node number of elements (" << dtype.number_of_elements() << " " << h5_test_num_ele << ")" - << " != HDF5 Dataset size (" << H5Tget_size(h5_test_dtype) << ")"; + << " != HDF5 Dataset size (" << H5Tget_size(h5_test_dtype_hnd.id()) << ")"; incompat_details = oss.str(); res = false; } - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Tclose(h5_test_dtype), - hdf5_id, - ref_path, - "Failed to close HDF5 Datatype " - << h5_test_dtype); - // clean up when necessary - conduit_dtype_to_hdf5_dtype_cleanup(h5_dtype); + + // auto cleanup of h5_test_dtype_hnd and h5_dtype_hnd } - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Sclose(h5_test_dspace), - hdf5_id, - ref_path, - "Failed to close HDF5 Dataspace " << h5_test_dspace); + // auto cleanup of h5_test_dspace_hnd + // } else { @@ -1296,25 +1477,22 @@ check_if_conduit_object_is_compatible_with_hdf5_tree(const Node &node, // check if the HDF5 group has child with same name // as the node's child - hid_t h5_child_obj = H5Oopen(hdf5_id, - itr.name().c_str(), - H5P_DEFAULT); + RelayH5OHandle h5_child_obj_hnd(H5Oopen(hdf5_id, + itr.name().c_str(), + H5P_DEFAULT), + ref_path); std::string chld_ref_path = join_ref_paths(ref_path,itr.name()); - if( CONDUIT_HDF5_VALID_ID(h5_child_obj) ) + if( CONDUIT_HDF5_VALID_ID(h5_child_obj_hnd.id()) ) { // if a child does exist, we need to make sure the child is // compatible with the conduit node res = check_if_conduit_node_is_compatible_with_hdf5_tree(child, chld_ref_path, - h5_child_obj, + h5_child_obj_hnd.id(), opts, incompat_details); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Oclose(h5_child_obj), - hdf5_id, - ref_path, - "Failed to close HDF5 Object: " << h5_child_obj); + h5_child_obj_hnd.close(); } // no child exists with this name, we are ok (it can be created // to match) check the next child @@ -1390,30 +1568,27 @@ check_if_conduit_list_is_compatible_with_hdf5_tree(const Node &node, // for a list, we check the group's children by index (not name) - hid_t h5_child_obj = H5Oopen_by_idx(hdf5_id, ".", + RelayH5OHandle h5_child_obj_hnd(H5Oopen_by_idx(hdf5_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, itr.index(), - H5P_DEFAULT); + H5P_DEFAULT), + hdf5_id, + ref_path); std::string chld_ref_path = join_ref_paths(ref_path,itr.name()); - if( CONDUIT_HDF5_VALID_ID(h5_child_obj) ) + if( CONDUIT_HDF5_VALID_ID(h5_child_obj_hnd.id()) ) { // if a child does exist, we need to make sure the child is // compatible with the conduit node res = check_if_conduit_node_is_compatible_with_hdf5_tree(child, chld_ref_path, - h5_child_obj, + h5_child_obj_hnd.id(), opts, incompat_details); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Oclose(h5_child_obj), - hdf5_id, - ref_path, - "Failed to close HDF5 Object: " << h5_child_obj); } - // no child exists with this index, we are ok (it can be created - // to match) + // no child exists with this index, we are ok + // (it can be created to match) } } else // bad id or not a group @@ -1596,12 +1771,18 @@ create_hdf5_dataset_for_conduit_leaf(const DataType &dtype, { hid_t res = -1; - hid_t h5_dtype = conduit_dtype_to_hdf5_dtype(dtype,ref_path); + RelayH5THandle h5_dtype_hnd(conduit_dtype_to_hdf5_dtype(dtype,ref_path), + hdf5_group_id, + ref_path); + h5_dtype_hnd.check_created(); hsize_t num_eles = (hsize_t) dtype.number_of_elements(); hid_t h5_cprops_id = H5P_DEFAULT; + RelayH5PHandle h5_cprops_hnd(-1, hdf5_group_id, ref_path); + + bool unlimited_dim = false; if (extendible && !HDF5Options::chunking_enabled) @@ -1616,20 +1797,24 @@ create_hdf5_dataset_for_conduit_leaf(const DataType &dtype, dtype.bytes_compact() <= HDF5Options::compact_storage_threshold) { h5_cprops_id = create_hdf5_compact_plist_for_conduit_leaf(); + // if we create custom plist, connect to handle for auto cleanup + h5_cprops_hnd.set_id(h5_cprops_id); } else if( extendible || (HDF5Options::chunking_enabled && dtype.bytes_compact() > HDF5Options::chunk_threshold)) { h5_cprops_id = create_hdf5_chunked_plist_for_conduit_leaf(dtype); + // if we create custom plist, connect to handle for auto cleanup + h5_cprops_hnd.set_id(h5_cprops_id); unlimited_dim = true; } - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_cprops_id, - hdf5_group_id, - ref_path, + if(h5_cprops_id != H5P_DEFAULT) + { + h5_cprops_hnd.check_created(); + } - "Failed to create HDF5 property list"); - hid_t h5_dspace_id = -1; + RelayH5SHandle h5_dspace_hnd(-1, hdf5_group_id, ref_path); // string a scalar with size embedded in type is disabled // b/c this path undermines compression @@ -1647,29 +1832,25 @@ create_hdf5_dataset_for_conduit_leaf(const DataType &dtype, if (unlimited_dim) { hsize_t unlimited_dims[1] = {H5S_UNLIMITED}; - h5_dspace_id = H5Screate_simple(1, - &num_eles, - unlimited_dims); + h5_dspace_hnd.set_id(H5Screate_simple(1, + &num_eles, + unlimited_dims)); } else { // TODO: add ndarray heuristics - h5_dspace_id = H5Screate_simple(1, - &num_eles, - NULL); + h5_dspace_hnd.set_id(H5Screate_simple(1, + &num_eles, + NULL)); } - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dspace_id, - hdf5_group_id, - ref_path, - "Failed to create HDF5 Dataspace"); + h5_dspace_hnd.check_created(); // create new dataset res = H5Dcreate(hdf5_group_id, hdf5_dset_name.c_str(), - h5_dtype, - h5_dspace_id, + h5_dtype_hnd.id(), + h5_dspace_hnd.id(), H5P_DEFAULT, h5_cprops_id, H5P_DEFAULT); @@ -1681,28 +1862,8 @@ create_hdf5_dataset_for_conduit_leaf(const DataType &dtype, << hdf5_group_id << " " << hdf5_dset_name); - // cleanup if custom data type was used - conduit_dtype_to_hdf5_dtype_cleanup(h5_dtype); - - // close plist used for compression - if(h5_cprops_id != H5P_DEFAULT) - { - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Pclose(h5_cprops_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 compression " - "property list " - << h5_cprops_id); - } - - // close our dataspace - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Sclose(h5_dspace_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Dataspace " - << h5_dspace_id); - + // auto cleanup of h5_dtype_hnd, h5_dspace_hnd, and h5_cprops_hnd (if used) return res; } @@ -1715,20 +1876,24 @@ create_hdf5_dataset_for_conduit_empty(hid_t hdf5_group_id, const std::string &hdf5_dset_name) { hid_t res = -1; + // for conduit empty, use an opaque data type with zero size; - hid_t h5_dtype_id = H5Tcreate(H5T_OPAQUE, 1); - hid_t h5_dspace_id = H5Screate(H5S_NULL); + RelayH5THandle h5_dtype_hnd(H5Tcreate(H5T_OPAQUE, 1), + hdf5_group_id, + ref_path); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dspace_id, - hdf5_group_id, - ref_path, - "Failed to create HDF5 Dataspace"); + h5_dtype_hnd.check_created(); + + RelayH5SHandle h5_dspace_hnd(H5Screate(H5S_NULL), + hdf5_group_id, + ref_path); + h5_dspace_hnd.check_created(); // create new dataset res = H5Dcreate(hdf5_group_id, hdf5_dset_name.c_str(), - h5_dtype_id, - h5_dspace_id, + h5_dtype_hnd.id(), + h5_dspace_hnd.id(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -1739,19 +1904,9 @@ create_hdf5_dataset_for_conduit_empty(hid_t hdf5_group_id, "Failed to create HDF5 Dataset " << hdf5_group_id << " " << hdf5_dset_name); - // close our datatype - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Tclose(h5_dtype_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Datatype"); - // close our dataspace - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Sclose(h5_dspace_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Dataspace " - << h5_dspace_id); return res; + // auto clean up of h5_dtype_id_hnd and h5_dspace_id } //----------------------------------------------------------------------------- @@ -1761,16 +1916,13 @@ create_hdf5_group_for_conduit_node(const Node &node, hid_t hdf5_parent_group_id, const std::string &hdf5_new_group_name) { - hid_t h5_gc_plist = H5Pcreate(H5P_GROUP_CREATE); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_gc_plist, - hdf5_parent_group_id, - ref_path, - "Failed to create H5P_GROUP_CREATE property " - << " list"); + RelayH5PHandle h5_gc_plist_hnd(H5Pcreate(H5P_GROUP_CREATE), + hdf5_parent_group_id, + ref_path); + h5_gc_plist_hnd.check_created(); // track creation order - herr_t h5_status = H5Pset_link_creation_order(h5_gc_plist, + herr_t h5_status = H5Pset_link_creation_order(h5_gc_plist_hnd.id(), ( H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) ); CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_status, @@ -1780,7 +1932,7 @@ create_hdf5_group_for_conduit_node(const Node &node, // prefer compact group storage // https://support.hdfgroup.org/HDF5/doc/RM/RM_H5G.html#Group-GroupStyles - h5_status = H5Pset_link_phase_change(h5_gc_plist, + h5_status = H5Pset_link_phase_change(h5_gc_plist_hnd.id(), 32, // max for compact storage 32); // min for dense storage @@ -1807,7 +1959,7 @@ create_hdf5_group_for_conduit_node(const Node &node, } // set hints for meta data about link names - h5_status = H5Pset_est_link_info(h5_gc_plist, + h5_status = H5Pset_est_link_info(h5_gc_plist_hnd.id(), // number of children (unsigned int)num_children, // est name size @@ -1821,7 +1973,7 @@ create_hdf5_group_for_conduit_node(const Node &node, hid_t h5_child_id = H5Gcreate(hdf5_parent_group_id, hdf5_new_group_name.c_str(), H5P_DEFAULT, - h5_gc_plist, + h5_gc_plist_hnd.id(), H5P_DEFAULT); CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_id, @@ -1831,14 +1983,8 @@ create_hdf5_group_for_conduit_node(const Node &node, << " parent: " << hdf5_parent_group_id << " name: " << hdf5_new_group_name); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Pclose(h5_gc_plist), - hdf5_parent_group_id, - ref_path, - "Failed to close HDF5 H5P_GROUP_CREATE " - << "property list: " - << h5_gc_plist); - return h5_child_id; + // auto cleanup of h5_gc_plist_hnd } @@ -1851,7 +1997,12 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, { DataType dt = node.dtype(); - hid_t h5_dtype_id = conduit_dtype_to_hdf5_dtype(dt,ref_path); + // NOTE: The `hdf5_dset_id` may change, not captured in current design + RelayH5THandle h5_dtype_hnd(conduit_dtype_to_hdf5_dtype(dt,ref_path), + hdf5_dset_id, + ref_path); + h5_dtype_hnd.check_created(); + herr_t h5_status = -1; hsize_t offset = 0; @@ -1871,10 +2022,13 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, } // get dimensions of dset - hid_t dataspace = H5Dget_space(hdf5_dset_id); - hsize_t dataset_dim = H5Sget_simple_extent_npoints(dataspace); + RelayH5SHandle h5_dspace_hnd(H5Dget_space(hdf5_dset_id), + hdf5_dset_id, + ref_path); + + hsize_t dataset_dim = H5Sget_simple_extent_npoints(h5_dspace_hnd.id()); hsize_t dataset_max_dims[1]; - H5Sget_simple_extent_dims(dataspace, NULL, dataset_max_dims); + H5Sget_simple_extent_dims(h5_dspace_hnd.id(), NULL, dataset_max_dims); // if the layout is fixed and no offset/stride is supplied, // the entire array is overwriten @@ -1886,7 +2040,7 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, { // write data h5_status = H5Dwrite(hdf5_dset_id, - h5_dtype_id, + h5_dtype_hnd.id(), H5S_ALL, H5S_ALL, H5P_DEFAULT, @@ -1898,7 +2052,7 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, Node n; node.compact_to(n); h5_status = H5Dwrite(hdf5_dset_id, - h5_dtype_id, + h5_dtype_hnd.id(), H5S_ALL, H5S_ALL, H5P_DEFAULT, @@ -1913,7 +2067,6 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, // get the node dset size hsize_t node_size[1] = {(hsize_t) dt.number_of_elements()}; - hid_t nodespace = H5Screate_simple(1, node_size, NULL); hsize_t offsets[1] = {offset}; hsize_t strides[1] = {stride}; @@ -1944,7 +2097,9 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, std::string hdf5_dset_path = std::string(&hdf5_i_buff[0]); // get the hdf5 file ID containing dset - hid_t hdf5_id = H5Iget_file_id(hdf5_dset_id); + + RelayH5FHandle h5_file_hnd(H5Iget_file_id(hdf5_dset_id), + ref_path); // get dset's name and parent group name std::string hdf5_dset_parent_name; @@ -1959,34 +2114,44 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, } // get dset's parent group ID - hid_t hdf5_dset_parent_id = H5Oopen(hdf5_id, - hdf5_dset_parent_name.c_str(), H5P_DEFAULT); + RelayH5OHandle hdf5_dset_parent_hnd(H5Oopen(h5_file_hnd.id(), + hdf5_dset_parent_name.c_str(), + H5P_DEFAULT), + h5_file_hnd.id(), + ref_path); + hdf5_dset_parent_hnd.check_created(); + + // close the old dataset to prevent the old identifier from + // interfering + + CONDUIT_CHECK_HDF5_ERROR_WITH_REF_PATH(H5Oclose(hdf5_dset_id), + ref_path, + "Failed to close HDF5 Object: " << hdf5_dset_id); // delete old dset (space is made inaccessible, lost, // and not reclaimed) - hdf5_remove_path(hdf5_id, hdf5_dset_path); + hdf5_remove_path(h5_file_hnd.id(), hdf5_dset_path); // create new extendible dset Node opts_create; opts_create["offset"] = 0; write_conduit_leaf_to_hdf5_group(dset_to_node, ref_path, - hdf5_dset_parent_id, + hdf5_dset_parent_hnd.id(), hdf5_dset_name, opts_create); - // close the old dataset to prevent the old identifier from - // interfering - H5Oclose(hdf5_dset_id); - H5Dclose(hdf5_dset_parent_id); + hdf5_dset_id = H5Oopen(h5_file_hnd.id(), + hdf5_dset_path.c_str(), + H5P_DEFAULT); + CONDUIT_CHECK_HDF5_ERROR_WITH_REF_PATH(hdf5_dset_id, + ref_path, + "Failed to get handle to new HDF5 Dataset"); - hdf5_dset_id = H5Oopen(hdf5_id, - hdf5_dset_path.c_str(), H5P_DEFAULT); + // auto clean up of hdf5_dset_parent_hnd and h5_file_hnd + h5_dspace_hnd.close(); + h5_dspace_hnd.set_id(H5Dget_space(hdf5_dset_id)); - H5Fclose(hdf5_id); - - H5Sclose(dataspace); - dataspace = H5Dget_space(hdf5_dset_id); } // get the dimensions required to fit the node in the dset @@ -2006,22 +2171,25 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, << hdf5_dset_id); //get new dataspace after extending - H5Sclose(dataspace); - dataspace = H5Dget_space(hdf5_dset_id); + h5_dspace_hnd.close(); + h5_dspace_hnd.set_id(H5Dget_space(hdf5_dset_id)); } // select indices to write to - H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offsets, - strides, node_size, NULL); + H5Sselect_hyperslab(h5_dspace_hnd.id(), H5S_SELECT_SET, offsets, + strides, node_size, NULL); + RelayH5SHandle h5_node_dspace_hnd(H5Screate_simple(1, node_size, NULL), + hdf5_dset_id, + ref_path); // if the node is compact, we can write directly from its data ptr if(dt.is_compact()) { // write data h5_status = H5Dwrite(hdf5_dset_id, - h5_dtype_id, - nodespace, - dataspace, + h5_dtype_hnd.id(), + h5_node_dspace_hnd.id(), + h5_dspace_hnd.id(), H5P_DEFAULT, node.data_ptr()); } @@ -2031,16 +2199,15 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, Node n; node.compact_to(n); h5_status = H5Dwrite(hdf5_dset_id, - h5_dtype_id, - nodespace, - dataspace, + h5_dtype_hnd.id(), + h5_node_dspace_hnd.id(), + h5_dspace_hnd.id(), H5P_DEFAULT, n.data_ptr()); } - - H5Sclose(nodespace); + // auto cleanup of h5_node_dspace_hnd } - H5Sclose(dataspace); + // auto cleanup of h5_dspace_hnd and h5_dtype_hnd // check write result CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_status, @@ -2048,8 +2215,6 @@ write_conduit_leaf_to_hdf5_dataset(const Node &node, ref_path, "Failed to write to HDF5 Dataset " << hdf5_dset_id); - - conduit_dtype_to_hdf5_dtype_cleanup(h5_dtype_id); } @@ -2087,8 +2252,8 @@ write_conduit_leaf_to_hdf5_group(const Node &node, // hdf5_dset_name.c_str(), // 0, // NULL); - hid_t h5_child_id = -1; + RelayH5DHandle h5_child_hnd(h5_child_id, hdf5_group_id, ref_path); if( CONDUIT_HDF5_STATUS_OK(h5_info_status) ) { @@ -2098,7 +2263,8 @@ write_conduit_leaf_to_hdf5_group(const Node &node, h5_child_id = H5Dopen(hdf5_group_id, hdf5_dset_name.c_str(), H5P_DEFAULT); - + h5_child_hnd.set_id(h5_child_id); + // custom check for better context CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_id, hdf5_group_id, ref_path, @@ -2116,12 +2282,13 @@ write_conduit_leaf_to_hdf5_group(const Node &node, { extendible = true; } - h5_child_id = create_hdf5_dataset_for_conduit_leaf(node.dtype(), - ref_path, - hdf5_group_id, - hdf5_dset_name, - extendible); + h5_child_id = create_hdf5_dataset_for_conduit_leaf(node.dtype(), + ref_path, + hdf5_group_id, + hdf5_dset_name, + extendible); + h5_child_hnd.set_id(h5_child_id); CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_id, hdf5_group_id, ref_path, @@ -2138,12 +2305,10 @@ write_conduit_leaf_to_hdf5_group(const Node &node, chld_ref_path, h5_child_id, opts); + // note: h5_child_id may have changed + h5_child_hnd.set_id(h5_child_id); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Dclose(h5_child_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Dataset: " - << h5_child_id); + // auto cleanup of h5_child_hnd } //---------------------------------------------------------------------------// @@ -2169,8 +2334,6 @@ write_conduit_empty_to_hdf5_group(hid_t hdf5_group_id, H5P_DEFAULT); #endif - hid_t h5_child_id = -1; - if( CONDUIT_HDF5_STATUS_OK(h5_info_status) ) { // if it does exist, we assume it is compatible @@ -2180,22 +2343,13 @@ write_conduit_empty_to_hdf5_group(hid_t hdf5_group_id, else { // if the hdf5 dataset does not exist, we need to create it - h5_child_id = create_hdf5_dataset_for_conduit_empty(hdf5_group_id, - ref_path, - hdf5_dset_name); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_id, - hdf5_group_id, - ref_path, - "Failed to create HDF5 Dataset " - << " parent: " << hdf5_group_id - << " name: " << hdf5_dset_name); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Dclose(h5_child_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Dataset: " - << h5_child_id); + RelayH5DHandle h5_child_hnd(create_hdf5_dataset_for_conduit_empty(hdf5_group_id, + ref_path, + hdf5_dset_name), + hdf5_group_id, + ref_path); + h5_child_hnd.check_created(); + // auto cleanup of h5_child_hnd } @@ -2283,16 +2437,16 @@ write_conduit_node_children_to_hdf5_group(const Node &node, H5P_DEFAULT); #endif - hid_t h5_child_id = -1; + RelayH5GHandle h5_child_hnd(-1, hdf5_group_id, ref_path); if( CONDUIT_HDF5_STATUS_OK(h5_info_status) ) { // if the hdf5 group exists, open it - h5_child_id = H5Gopen(hdf5_group_id, - child_name.c_str(), - H5P_DEFAULT); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_id, + h5_child_hnd.set_id(H5Gopen(hdf5_group_id, + child_name.c_str(), + H5P_DEFAULT)); + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_hnd.id(), hdf5_group_id, ref_path, "Failed to open HDF5 Group " @@ -2302,24 +2456,19 @@ write_conduit_node_children_to_hdf5_group(const Node &node, else { // if the hdf5 group doesn't exist, we need to create it - h5_child_id = create_hdf5_group_for_conduit_node(child, - ref_path, - hdf5_group_id, - child_name); - + h5_child_hnd.set_id(create_hdf5_group_for_conduit_node(child, + ref_path, + hdf5_group_id, + child_name)); + h5_child_hnd.check_created(); } // traverse write_conduit_node_children_to_hdf5_group(child, ref_path, - h5_child_id, + h5_child_hnd.id(), opts); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Gclose(h5_child_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Group " - << h5_child_id); + // auto cleanup of h5_child_hnd } else { @@ -2386,16 +2535,22 @@ write_conduit_hdf5_list_attribute(hid_t hdf5_group_id, int att_value = 1; - hid_t h5_dspace_id = H5Screate(H5S_SCALAR); - - hid_t h5_attr_id = H5Acreate(hdf5_group_id, - conduit_hdf5_list_attr_name.c_str(), - H5T_NATIVE_INT, - h5_dspace_id, - H5P_DEFAULT, - H5P_DEFAULT); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_attr_id, + RelayH5SHandle h5_dspace_hnd(H5Screate(H5S_SCALAR), + hdf5_group_id, + ref_path); + h5_dspace_hnd.check_created(); + + RelayH5AHandle h5_attr_hnd(H5Acreate(hdf5_group_id, + conduit_hdf5_list_attr_name.c_str(), + H5T_NATIVE_INT, + h5_dspace_hnd.id(), + H5P_DEFAULT, + H5P_DEFAULT), + hdf5_group_id, + ref_path); + + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_attr_hnd.id(), hdf5_group_id, ref_path, "Failed to create HDF5 Attribute " @@ -2404,7 +2559,7 @@ write_conduit_hdf5_list_attribute(hid_t hdf5_group_id, << conduit_hdf5_list_attr_name.c_str()); - hid_t h5_status = H5Awrite(h5_attr_id, + hid_t h5_status = H5Awrite(h5_attr_hnd.id(), H5T_NATIVE_INT, &att_value); CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_status, @@ -2415,19 +2570,7 @@ write_conduit_hdf5_list_attribute(hid_t hdf5_group_id, << " " << conduit_hdf5_list_attr_name.c_str()); - // close our dataspace - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Sclose(h5_dspace_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Dataspace " - << h5_dspace_id); - - // close our attribute - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Aclose(h5_attr_id), - hdf5_group_id, - ref_path, - "Failed to close HDF5 Attribute " - << h5_attr_id); + // auto cleanup of h5_dspace_hnd and h5_attr_hnd } //---------------------------------------------------------------------------// @@ -2515,8 +2658,57 @@ struct h5_read_opdata // whether to only get metadata bool metadata_only; + + // hold error state + // + // don't let exceptions unwind or hdf5 will leak handles used for traversal + // https://support.hdfgroup.org/documentation/hdf5/latest/_r_m.html#cpp_c_api_note + // + // stash error info in our traversal callback, so we can throw + // post traverse + bool error_thrown; + conduit::Error traversal_error; }; +// +// Helper to init our h5_read_opdata struct +// +void init_h5_read_opdata(h5_read_opdata &h5_od, + H5O_info_t &h5_info_buf, + const std::string &ref_path, + bool only_get_metadata, + const Node &opts, + Node &dest) +{ + + // setup linked list tracking that allows us to detect cycles + h5_od.recurs = 0; + h5_od.prev = NULL; +#if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_18_API) + h5_od.token = &h5_info_buf.token; +#else + h5_od.addr = h5_info_buf.addr; +#endif + // attach the pointer to our node + h5_od.node = &dest; + h5_od.opts = &opts; + // keep ref path + h5_od.ref_path = ref_path; + + // whether to only get metadata + if (only_get_metadata) + { + h5_od.metadata_only = true; + } + else + { + h5_od.metadata_only = false; + } + + h5_od.error_thrown = false; + h5_od.traversal_error = conduit::Error(); +} + //---------------------------------------------------------------------------// /// Recursive check for cycles. /// (adapted from: h5ex_g_traverse) @@ -2653,141 +2845,148 @@ h5l_iterate_traverse_op_func(hid_t hdf5_id, /* Type conversion */ struct h5_read_opdata *h5_od = (struct h5_read_opdata*)hdf5_operator_data; - /* - * Get type of the object and display its name and type. - * The name of the object is passed to this function by - * the Library. - */ + // catch any exceptions to avoid stack unwind during traversal + try + { + /* + * Get type of the object and display its name and type. + * The name of the object is passed to this function by + * the Library. + */ #if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_18_API) - h5_status = H5Oget_info_by_name(hdf5_id, - hdf5_path, - &h5_info_buf, - H5O_INFO_ALL, - H5P_DEFAULT); + h5_status = H5Oget_info_by_name(hdf5_id, + hdf5_path, + &h5_info_buf, + H5O_INFO_ALL, + H5P_DEFAULT); #else - h5_status = H5Oget_info_by_name(hdf5_id, - hdf5_path, - &h5_info_buf, - H5P_DEFAULT); + h5_status = H5Oget_info_by_name(hdf5_id, + hdf5_path, + &h5_info_buf, + H5P_DEFAULT); #endif - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_status, - hdf5_id, - h5_od->ref_path, - "Error fetching HDF5 Object info: " - << " parent: " << hdf5_id - << " path:" << hdf5_path) ; + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_status, + hdf5_id, + h5_od->ref_path, + "Error fetching HDF5 Object info: " + << " parent: " << hdf5_id + << " path:" << hdf5_path) ; - std::string chld_ref_path = h5_od->ref_path; - if(chld_ref_path != std::string("/")) - { - chld_ref_path += std::string("/"); - } - chld_ref_path += std::string(hdf5_path); + std::string chld_ref_path = h5_od->ref_path; + if(chld_ref_path != std::string("/")) + { + chld_ref_path += std::string("/"); + } + chld_ref_path += std::string(hdf5_path); - switch (h5_info_buf.type) - { - case H5O_TYPE_GROUP: + switch (h5_info_buf.type) { -#if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_18_API) - /* - * With 1.12, we compare tokens, with the hope this provides - * the same cycle avoidance. - */ - if ( h5_group_check (h5_od, hdf5_id, &h5_info_buf.token) ) -#else - /* - * Check group address against linked list of operator - * data structures. We will always run the check, as the - * reference count cannot be relied upon if there are - * symbolic links, and H5Oget_info_by_name always follows - * symbolic links. Alternatively we could use H5Lget_info - * and never recurse on groups discovered by symbolic - * links, however it could still fail if an object's - * reference count was manually manipulated with - * H5Odecr_refcount. - */ - if ( h5_group_check (h5_od, h5_info_buf.addr) ) -#endif + case H5O_TYPE_GROUP: { - // skip cycles in the graph ... + #if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_18_API) + /* + * With 1.12, we compare tokens, with the hope this provides + * the same cycle avoidance. + */ + if ( h5_group_check (h5_od, hdf5_id, &h5_info_buf.token) ) + #else + /* + * Check group address against linked list of operator + * data structures. We will always run the check, as the + * reference count cannot be relied upon if there are + * symbolic links, and H5Oget_info_by_name always follows + * symbolic links. Alternatively we could use H5Lget_info + * and never recurse on groups discovered by symbolic + * links, however it could still fail if an object's + * reference count was manually manipulated with + * H5Odecr_refcount. + */ + if ( h5_group_check (h5_od, h5_info_buf.addr) ) + #endif + { + // skip cycles in the graph ... + } + else + { + RelayH5GHandle h5_group_hnd(H5Gopen(hdf5_id, + hdf5_path, + H5P_DEFAULT), + hdf5_id, + h5_od->ref_path); + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_group_hnd.id(), + hdf5_id, + h5_od->ref_path, + "Error opening HDF5 " + << "Group: " + << " parent: " + << hdf5_id + << " path:" + << hdf5_path); + + Node *chld_node_ptr = h5l_iterate_traverse_op_func_get_child( + *h5_od->node, + std::string(hdf5_path)); + + read_hdf5_group_into_conduit_node(h5_group_hnd.id(), + chld_ref_path, + h5_od->metadata_only, + *h5_od->opts, + *chld_node_ptr); + + // autoclean up of h5_group_hnd + } + break; } - else + case H5O_TYPE_DATASET: { - hid_t h5_group_id = H5Gopen(hdf5_id, - hdf5_path, - H5P_DEFAULT); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_group_id, - hdf5_id, - h5_od->ref_path, - "Error opening HDF5 " - << "Group: " - << " parent: " - << hdf5_id - << " path:" - << hdf5_path); - Node *chld_node_ptr = h5l_iterate_traverse_op_func_get_child( - *h5_od->node, - std::string(hdf5_path)); - - read_hdf5_group_into_conduit_node(h5_group_id, - chld_ref_path, - h5_od->metadata_only, - *h5_od->opts, - *chld_node_ptr); - - // close the group - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Gclose(h5_group_id), + *h5_od->node, + std::string(hdf5_path)); + + // open hdf5 dataset at path + RelayH5DHandle h5_dset_hnd(H5Dopen(hdf5_id, + hdf5_path, + H5P_DEFAULT), + hdf5_id, + h5_od->ref_path); + + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dset_hnd.id(), hdf5_id, h5_od->ref_path, - "Error closing HDF5 " - << "Group: " - << h5_group_id); + "Error opening HDF5 " + << " Dataset: " + << " parent: " + << hdf5_id + << " path:" + << hdf5_path); + read_hdf5_dataset_into_conduit_node(h5_dset_hnd.id(), + chld_ref_path, + h5_od->metadata_only, + *h5_od->opts, + *chld_node_ptr); + + // auto clean up of h5_dset_hnd + break; + } + default: + { + // unsupported } - break; - } - case H5O_TYPE_DATASET: - { - Node *chld_node_ptr = h5l_iterate_traverse_op_func_get_child( - *h5_od->node, - std::string(hdf5_path)); - - // open hdf5 dataset at path - hid_t h5_dset_id = H5Dopen(hdf5_id, - hdf5_path, - H5P_DEFAULT); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dset_id, - hdf5_id, - h5_od->ref_path, - "Error opening HDF5 " - << " Dataset: " - << " parent: " - << hdf5_id - << " path:" - << hdf5_path); - read_hdf5_dataset_into_conduit_node(h5_dset_id, - chld_ref_path, - h5_od->metadata_only, - *h5_od->opts, - *chld_node_ptr); - - // close the dataset - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Dclose(h5_dset_id), - hdf5_id, - h5_od->ref_path, - "Error closing HDF5 " - << " Dataset: " - << h5_dset_id); - break; - } - default: - { - // unsupported } } + catch(const conduit::Error &e) + { + // record that an error was thrown and + // a copy of the details so we can rethrow + // after hdf5 traversal process is complete + h5_od->error_thrown = true; + h5_od->traversal_error = e; + h5_return_val = H5_ITER_ERROR; + } return h5_return_val; } @@ -2827,40 +3026,26 @@ read_hdf5_group_into_conduit_node(hid_t hdf5_group_id, // setup the callback struct we will use for H5Literate struct h5_read_opdata h5_od; - // setup linked list tracking that allows us to detect cycles - h5_od.recurs = 0; - h5_od.prev = NULL; -#if H5_VERSION_GE(1, 12, 0) && !defined(H5_USE_18_API) - h5_od.token = &h5_info_buf.token; -#else - h5_od.addr = h5_info_buf.addr; -#endif - // attach the pointer to our node - h5_od.node = &dest; - h5_od.opts = &opts; - // keep ref path - h5_od.ref_path = ref_path; - // whether to only get metadata - if (only_get_metadata) - { - h5_od.metadata_only = true; - } - else - { - h5_od.metadata_only = false; - } + init_h5_read_opdata(h5_od, + h5_info_buf, + ref_path, + only_get_metadata, + opts, + dest); H5_index_t h5_grp_index_type = H5_INDEX_NAME; // check for creation order index using propertylist - hid_t h5_gc_plist = H5Gget_create_plist(hdf5_group_id); + RelayH5PHandle h5_gc_plist_hnd(H5Gget_create_plist(hdf5_group_id), + hdf5_group_id, + ref_path); - if( CONDUIT_HDF5_VALID_ID(h5_gc_plist) ) + if( CONDUIT_HDF5_VALID_ID(h5_gc_plist_hnd.id()) ) { unsigned int h5_gc_flags = 0; - h5_status = H5Pget_link_creation_order(h5_gc_plist, + h5_status = H5Pget_link_creation_order(h5_gc_plist_hnd.id(), &h5_gc_flags); // first make sure we have the link creation order plist @@ -2873,17 +3058,13 @@ read_hdf5_group_into_conduit_node(hid_t hdf5_group_id, h5_grp_index_type = H5_INDEX_CRT_ORDER; } } - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Pclose(h5_gc_plist), - hdf5_group_id, - ref_path, - "Failed to close HDF5 " - << "H5P_GROUP_CREATE " - << "property list: " - << h5_gc_plist); } + // auto cleanup of h5_gc_plist_hnd + // use H5_ITER_ERROR + // don't let exceptions unwind or hdf5 will leak handles used for traversal + // https://support.hdfgroup.org/documentation/hdf5/latest/_r_m.html#cpp_c_api_note // use H5Literate to traverse h5_status = H5Literate(hdf5_group_id, @@ -2893,6 +3074,12 @@ read_hdf5_group_into_conduit_node(hid_t hdf5_group_id, h5l_iterate_traverse_op_func, (void *) &h5_od); + if(h5_status == H5_ITER_ERROR && h5_od.error_thrown) + { + throw h5_od.traversal_error; + } + + // check for an error that was not an exception CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_status, hdf5_group_id, ref_path, @@ -2957,8 +3144,10 @@ calculate_readsize(index_t_array readsize, index_t rank, //---------------------------------------------------------------------------// void -fill_dataset_opts(const std::string & ref_path, const Node & inopts, - hid_t dataspace_id, Node & filled_opts) +fill_dataset_opts(const std::string &ref_path, + const Node & inopts, + hid_t dataspace_id, + Node &filled_opts) { // Intent here is to do a deep copy, since opts is a const ref @@ -3040,33 +3229,39 @@ read_hdf5_dataset_into_conduit_node(hid_t hdf5_dset_id, const Node &opts, Node &dest) { - hid_t h5_dspace_id = H5Dget_space(hdf5_dset_id); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dspace_id, + + RelayH5SHandle h5_dspace_hnd(H5Dget_space(hdf5_dset_id), + hdf5_dset_id, + ref_path); + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dspace_hnd.id(), hdf5_dset_id, ref_path, "Error reading HDF5 Dataspace: " - << hdf5_dset_id); + << h5_dspace_hnd.id()); // check for empty case - if(H5Sget_simple_extent_type(h5_dspace_id) == H5S_NULL) + if(H5Sget_simple_extent_type(h5_dspace_hnd.id()) == H5S_NULL) { // change to empty dest.reset(); } else { - hid_t h5_dtype_id = H5Dget_type(hdf5_dset_id); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dtype_id, + RelayH5THandle h5_dtype_hnd(H5Dget_type(hdf5_dset_id), + hdf5_dset_id, + ref_path); + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dtype_hnd.id(), hdf5_dset_id, ref_path, "Error reading HDF5 Datatype: " << hdf5_dset_id); - hid_t h5_status = 0; + hid_t h5_status = 0; Node filled_opts; - fill_dataset_opts(ref_path, opts, h5_dspace_id, filled_opts); + fill_dataset_opts(ref_path, opts, h5_dspace_hnd.id(), filled_opts); Node& slab_params = filled_opts["slabparams"]; index_t rank = slab_params["rank"].to_long_long(); @@ -3093,7 +3288,7 @@ read_hdf5_dataset_into_conduit_node(hid_t hdf5_dset_id, else { // Note: string case is handed properly in hdf5_dtype_to_conduit_dtype - DataType dt = hdf5_dtype_to_conduit_dtype(h5_dtype_id, + DataType dt = hdf5_dtype_to_conduit_dtype(h5_dtype_hnd.id(), readsize, rank, ref_path); @@ -3112,43 +3307,28 @@ read_hdf5_dataset_into_conduit_node(hid_t hdf5_dset_id, dt.set_endianness(Endianness::machine_default()); // clean up our old handle - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Tclose(h5_dtype_id), - hdf5_dset_id, - ref_path, - "Error closing HDF5 Datatype: " - << h5_dtype_id); + h5_dtype_hnd.close(); // get ref to standard variant of this dtype - h5_dtype_id = conduit_dtype_to_hdf5_dtype(dt, - ref_path); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dtype_id, - hdf5_dset_id, - ref_path, - "Error creating HDF5 Datatype"); - - // copy since the logic after read will cleanup - h5_dtype_id = H5Tcopy(h5_dtype_id); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_dtype_id, - hdf5_dset_id, - ref_path, - "Error copying HDF5 Datatype"); - // cleanup our ref from conduit_dtype_to_hdf5_dtype if necessary - conduit_dtype_to_hdf5_dtype_cleanup(h5_dtype_id); + h5_dtype_hnd.set_id(conduit_dtype_to_hdf5_dtype(dt, + ref_path)); + h5_dtype_hnd.check_created(); } hsize_t node_size[1] = {readtotal}; - hid_t nodespace = H5Screate_simple(1,node_size,NULL); - hid_t dataspace = H5Dget_space(hdf5_dset_id); + RelayH5SHandle h5_node_dspace_hnd(H5Screate_simple(1,node_size,NULL), + hdf5_dset_id, + ref_path); + h5_node_dspace_hnd.check_created(); // select hyperslab - H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, - readsize, NULL); + H5Sselect_hyperslab(h5_dspace_hnd.id(), H5S_SELECT_SET, offset, stride, + readsize, NULL); // Don't check for errors here, because H5Sselect_hyperslab // returns -1 (error) if dataspace refers to a scalar. // check for string special case, H5T_VARIABLE string - if( H5Tis_variable_str(h5_dtype_id) ) + if( H5Tis_variable_str(h5_dtype_hnd.id()) ) { //special case for reading variable string data // hdf5 reads the data onto its heap, and @@ -3156,9 +3336,9 @@ read_hdf5_dataset_into_conduit_node(hid_t hdf5_dset_id, char *read_ptr[1] = {NULL}; h5_status = H5Dread(hdf5_dset_id, - h5_dtype_id, - nodespace, - dataspace, + h5_dtype_hnd.id(), + h5_node_dspace_hnd.id(), + h5_dspace_hnd.id(), H5P_DEFAULT, read_ptr); @@ -3187,9 +3367,9 @@ read_hdf5_dataset_into_conduit_node(hid_t hdf5_dset_id, // we can read directly from hdf5 dataset if compact // & compatible h5_status = H5Dread(hdf5_dset_id, - h5_dtype_id, - nodespace, - dataspace, + h5_dtype_hnd.id(), + h5_node_dspace_hnd.id(), + h5_dspace_hnd.id(), H5P_DEFAULT, dest.data_ptr()); } @@ -3202,18 +3382,16 @@ read_hdf5_dataset_into_conduit_node(hid_t hdf5_dset_id, // reading will not unless it's already compatible and compact. Node n_tmp(dt); h5_status = H5Dread(hdf5_dset_id, - h5_dtype_id, - nodespace, - dataspace, + h5_dtype_hnd.id(), + h5_node_dspace_hnd.id(), + h5_dspace_hnd.id(), H5P_DEFAULT, n_tmp.data_ptr()); // copy out to our dest dest.set(n_tmp); } - - H5Sclose(nodespace); - H5Sclose(dataspace); + // auto cleanup of h5_node_dspace_hnd } if(opts.dtype().is_empty()) @@ -3236,21 +3414,9 @@ read_hdf5_dataset_into_conduit_node(hid_t hdf5_dset_id, << "HDF5 dataset size: " << nelems); } - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Tclose(h5_dtype_id), - hdf5_dset_id, - ref_path, - "Error closing HDF5 Datatype: " - << h5_dtype_id); - } - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Sclose(h5_dspace_id), - hdf5_dset_id, - ref_path, - "Error closing HDF5 Dataspace: " - << h5_dspace_id); - + // auto cleanup of h5_dspace_hnd } //---------------------------------------------------------------------------// @@ -3379,7 +3545,8 @@ create_hdf5_file_access_plist() // https://forum.hdfgroup.org/t/seconding-the-request-for-h5pset-libver-bounds-1-8-x-file-compat-option/4056 // so only enable H5F_LIBVER_LATEST if we are using hdf5 1.8 - if(major_num == 1 && minor_num >= 8) + if( (major_num == 1 && minor_num >= 8) || + major_num > 1 ) { #if H5_VERSION_GE(1, 10, 2) if(HDF5Options::libver == "default" || @@ -3404,6 +3571,13 @@ create_hdf5_file_access_plist() { h5_status = H5Pset_libver_bounds(h5_fa_props, H5F_LIBVER_V18, H5F_LIBVER_V114); } +#endif +// nested case for hdf5 >= 2.0 +#if H5_VERSION_GE(2, 0, 0) + else if(HDF5Options::libver == "v200") + { + h5_status = H5Pset_libver_bounds(h5_fa_props, H5F_LIBVER_V200, H5F_LIBVER_V200); + } #endif else if(HDF5Options::libver == "latest") { @@ -3488,29 +3662,24 @@ hdf5_create_file(const std::string &file_path) // disable hdf5 error stack HDF5ErrorStackSuppressor supress_hdf5_errors; - hid_t h5_fc_plist = create_hdf5_file_create_plist(); - hid_t h5_fa_plist = create_hdf5_file_access_plist(); + RelayH5PHandle h5_fc_plist_hnd(create_hdf5_file_create_plist(),file_path); + h5_fc_plist_hnd.check_created(); + RelayH5PHandle h5_fa_plist_hnd(create_hdf5_file_access_plist(),file_path); + h5_fa_plist_hnd.check_created(); // open the hdf5 file for writing hid_t h5_file_id = H5Fcreate(file_path.c_str(), H5F_ACC_TRUNC, - h5_fc_plist, - h5_fa_plist); + h5_fc_plist_hnd.id(), + h5_fa_plist_hnd.id()); CONDUIT_CHECK_HDF5_ERROR(h5_file_id, "Error opening HDF5 file for writing: " << file_path); - CONDUIT_CHECK_HDF5_ERROR(H5Pclose(h5_fc_plist), - "Failed to close HDF5 H5P_GROUP_CREATE " - << "property list: " << h5_fc_plist); - - CONDUIT_CHECK_HDF5_ERROR(H5Pclose(h5_fa_plist), - "Failed to close HDF5 H5P_FILE_ACCESS " - << "property list: " << h5_fa_plist); - return h5_file_id; + // auto cleanup of h5_fc_plist_hnd and h5_fa_plist_hnd // enable hdf5 error stack } @@ -3834,27 +4003,35 @@ hdf5_write(const Node &node, // disable hdf5 error stack HDF5ErrorStackSuppressor supress_hdf5_errors; - hid_t h5_file_id = -1; + // file handle for auto cleanup + RelayH5FHandle h5_file_hnd(-1,file_path); + if(append && utils::is_file(file_path)) { // open existing hdf5 file for read + write - h5_file_id = hdf5_open_file_for_read_write(file_path); + h5_file_hnd.set_id(hdf5_open_file_for_read_write(file_path)); } else // trunc { // open the hdf5 file for writing - h5_file_id = hdf5_create_file(file_path); + h5_file_hnd.set_id(hdf5_create_file(file_path)); } + h5_file_hnd.check_created(); + + hid_t h5_file_id = h5_file_hnd.id(); + hdf5_write(node, h5_file_id, hdf5_path, opts); - // close the hdf5 file - CONDUIT_CHECK_HDF5_ERROR(H5Fclose(h5_file_id), - "Error closing HDF5 file: " << file_path); + // note: h5_file_id wont change in this case, + // but general write api supports the id changing + // still execute the motions + h5_file_hnd.set_id(h5_file_id); + // auto cleanup of h5_file_hnd // restore hdf5 error stack } @@ -3875,23 +4052,19 @@ hdf5_open_file_for_read(const std::string &file_path) // disable hdf5 error stack HDF5ErrorStackSuppressor supress_hdf5_errors; - hid_t h5_fa_plist = create_hdf5_file_access_plist(); + RelayH5PHandle h5_fa_plist_hnd(create_hdf5_file_access_plist(),file_path); // open the hdf5 file for reading hid_t h5_file_id = H5Fopen(file_path.c_str(), H5F_ACC_RDONLY, - h5_fa_plist); + h5_fa_plist_hnd.id()); CONDUIT_CHECK_HDF5_ERROR(h5_file_id, "Error opening HDF5 file for read only access: " << file_path); - CONDUIT_CHECK_HDF5_ERROR(H5Pclose(h5_fa_plist), - "Failed to close HDF5 H5P_FILE_ACCESS " - << "property list: " << h5_fa_plist); - return h5_file_id; - + // auto cleanup of h5_fa_plist_hnd // restore hdf5 error stack } @@ -3902,23 +4075,19 @@ hdf5_open_file_for_read_write(const std::string &file_path) // disable hdf5 error stack HDF5ErrorStackSuppressor supress_hdf5_errors; - hid_t h5_fa_plist = create_hdf5_file_access_plist(); + RelayH5PHandle h5_fa_plist_hnd(create_hdf5_file_access_plist(),file_path); // open the hdf5 file for read + write hid_t h5_file_id = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, - h5_fa_plist); + h5_fa_plist_hnd.id()); CONDUIT_CHECK_HDF5_ERROR(h5_file_id, "Error opening HDF5 file for read + write access: " << file_path); - CONDUIT_CHECK_HDF5_ERROR(H5Pclose(h5_fa_plist), - "Failed to close HDF5 H5P_FILE_ACCESS " - << "property list: " << h5_fa_plist); - return h5_file_id; - + // auto cleanup of h5_fa_plist_hnd // restore hdf5 error stack } @@ -3943,29 +4112,27 @@ hdf5_read(hid_t hdf5_id, // disable hdf5 error stack HDF5ErrorStackSuppressor supress_hdf5_errors; - // get hdf5 object at path, then call read_hdf5_tree_into_conduit_node - hid_t h5_child_obj = H5Oopen(hdf5_id, - hdf5_path.c_str(), - H5P_DEFAULT); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_obj, + // get hdf5 object at path, then call read_hdf5_tree_into_conduit_node + RelayH5OHandle h5_child_obj_hnd(H5Oopen(hdf5_id, + hdf5_path.c_str(), + H5P_DEFAULT), + hdf5_id, + hdf5_path); + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_obj_hnd.id(), hdf5_id, hdf5_path, "Failed to fetch HDF5 object from: " << hdf5_id << ":" << hdf5_path); - read_hdf5_tree_into_conduit_node(h5_child_obj, + read_hdf5_tree_into_conduit_node(h5_child_obj_hnd.id(), hdf5_path, false, opts, dest); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Oclose(h5_child_obj), - hdf5_id, - hdf5_path, - "Failed to close HDF5 Object: " - << h5_child_obj); - + // auto cleanup of h5_child_obj_hnd // restore hdf5 error stack } @@ -3989,16 +4156,15 @@ hdf5_read(const std::string &file_path, // note: hdf5 error stack is suppressed in these calls // open the hdf5 file for reading - hid_t h5_file_id = hdf5_open_file_for_read(file_path); + RelayH5FHandle h5_file_hnd(hdf5_open_file_for_read(file_path), + file_path); - hdf5_read(h5_file_id, + hdf5_read(h5_file_hnd.id(), hdf5_path, opts, node); - // close the hdf5 file - CONDUIT_CHECK_HDF5_ERROR(H5Fclose(h5_file_id), - "Error closing HDF5 file: " << file_path); + // auto cleanup of h5_file_hnd } //---------------------------------------------------------------------------// @@ -4089,28 +4255,25 @@ hdf5_read_info(hid_t hdf5_id, HDF5ErrorStackSuppressor supress_hdf5_errors; // get hdf5 object at path, then call read_hdf5_tree_into_conduit_node - hid_t h5_child_obj = H5Oopen(hdf5_id, - hdf5_path.c_str(), - H5P_DEFAULT); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_obj, + RelayH5OHandle h5_child_obj_hnd(H5Oopen(hdf5_id, + hdf5_path.c_str(), + H5P_DEFAULT), + hdf5_id, + hdf5_path); + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_child_obj_hnd.id(), hdf5_id, hdf5_path, "Failed to fetch HDF5 object from: " << hdf5_id << ":" << hdf5_path); - read_hdf5_tree_into_conduit_node(h5_child_obj, + read_hdf5_tree_into_conduit_node(h5_child_obj_hnd.id(), hdf5_path, true, opts, dest); - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Oclose(h5_child_obj), - hdf5_id, - hdf5_path, - "Failed to close HDF5 Object: " - << h5_child_obj); - + // auto cleanup of h5_child_obj_hnd // restore hdf5 error stack } @@ -4134,16 +4297,17 @@ hdf5_read_info(const std::string &file_path, // note: hdf5 error stack is suppressed in these calls // open the hdf5 file for reading - hid_t h5_file_id = hdf5_open_file_for_read(file_path); + RelayH5FHandle h5_file_hnd(hdf5_open_file_for_read(file_path), + file_path); + h5_file_hnd.check_created(); - hdf5_read_info(h5_file_id, + hdf5_read_info(h5_file_hnd.id(), hdf5_path, opts, node); + // auto cleanup of h5_file_hnd // close the hdf5 file - CONDUIT_CHECK_HDF5_ERROR(H5Fclose(h5_file_id), - "Error closing HDF5 file: " << file_path); } //---------------------------------------------------------------------------// @@ -4338,12 +4502,14 @@ void hdf5_group_list_child_names(hid_t hdf5_id, // https://support.hdfgroup.org/ftp/HDF5/examples/examples-by-api/hdf5-examples/1_10/C/H5G/h5ex_g_corder.c // + RelayH5GHandle h5_group_hnd(H5Gopen(hdf5_id, + hdf5_path.c_str(), + H5P_DEFAULT), + hdf5_id, + hdf5_path); - hid_t h5_group_id = H5Gopen(hdf5_id, - hdf5_path.c_str(), - H5P_DEFAULT); - - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_group_id, + // custom check for better context + CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(h5_group_hnd.id(), hdf5_id, "", "Error opening HDF5 " @@ -4355,7 +4521,7 @@ void hdf5_group_list_child_names(hid_t hdf5_id, // get group info so we can find the # of children H5G_info_t h5_group_info; - h5_status = H5Gget_info(h5_group_id, &h5_group_info); + h5_status = H5Gget_info(h5_group_hnd.id(), &h5_group_info); // buffer for child names, if the names are bigger than this // buffer can hold, we will fall back to a malloc @@ -4366,7 +4532,7 @@ void hdf5_group_list_child_names(hid_t hdf5_id, char *name_buff_ptr = name_buff; char *name_buff_tmp = NULL; // Get size of name, - int name_size = H5Lget_name_by_idx(h5_group_id, ".", + int name_size = H5Lget_name_by_idx(h5_group_hnd.id(), ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, i, @@ -4378,7 +4544,7 @@ void hdf5_group_list_child_names(hid_t hdf5_id, { // error, not valid std::string hdf5_err_ref_path; - hdf5_ref_path_with_filename(h5_group_id, + hdf5_ref_path_with_filename(h5_group_hnd.id(), hdf5_path, hdf5_err_ref_path); @@ -4396,7 +4562,7 @@ void hdf5_group_list_child_names(hid_t hdf5_id, name_buff_ptr = name_buff_tmp; } - name_size = H5Lget_name_by_idx(h5_group_id, ".", + name_size = H5Lget_name_by_idx(h5_group_hnd.id(), ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, i, @@ -4413,12 +4579,7 @@ void hdf5_group_list_child_names(hid_t hdf5_id, } } - CONDUIT_CHECK_HDF5_ERROR_WITH_FILE_AND_REF_PATH(H5Gclose(h5_group_id), - hdf5_id, - "", - "Failed to close HDF5 Group " - << h5_group_id); - + // auto cleanup of h5_group_hnd // restore hdf5 error stack } //----------------------------------------------------------------------------- diff --git a/src/tests/relay/t_relay_io_hdf5.cpp b/src/tests/relay/t_relay_io_hdf5.cpp index a1d802120..94c0b932b 100644 --- a/src/tests/relay/t_relay_io_hdf5.cpp +++ b/src/tests/relay/t_relay_io_hdf5.cpp @@ -34,7 +34,6 @@ check_h5_open_ids() return nids; } - //----------------------------------------------------------------------------- // helper to create an HDF5 dataset herr_t @@ -70,8 +69,6 @@ create_hdf5_nd_dataset(std::string fname, std::string path, int rank, int const return status; } - - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_by_file_name) { @@ -122,9 +119,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_by_file_name) EXPECT_EQ(n_load_generic["myobj/b"].as_uint32(), b_val); EXPECT_EQ(n_load_generic["myobj/c"].as_uint32(), c_val); - - - // save load from generic io interface io::save(n_load_generic["myobj"],"tout_hdf5_wr_generic.hdf5:myobj"); @@ -189,7 +183,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_special_paths) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_string) { @@ -221,7 +214,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_string) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_array) { @@ -256,7 +248,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_array) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_read_2D_array) { @@ -349,7 +340,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_read_2D_array) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_hdf5_dataset_handle) { @@ -418,7 +408,6 @@ TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_hdf5_dataset_handle) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_extendible_hdf5_dataset_handle_with_offset) { @@ -447,13 +436,11 @@ TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_extendible_hdf5_datas /* * Modify dataset creation properties, i.e. enable chunking. */ - hid_t cparms; hsize_t chunk_dims[1] = {1}; - cparms = H5Pcreate (H5P_DATASET_CREATE); + hid_t cparms = H5Pcreate (H5P_DATASET_CREATE); H5Pset_chunk(cparms, 1, chunk_dims); - // create new dataset hid_t h5_dset_id = H5Dcreate1(h5_file_id, "mydata", @@ -481,6 +468,7 @@ TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_extendible_hdf5_datas Node n_read, opts_read; io::hdf5_read_info(h5_dset_id,opts_read,n_read); + EXPECT_EQ(4,(int) n_read["num_elements"].to_value()); io::hdf5_read(h5_dset_id,opts_read,n_read); @@ -555,12 +543,10 @@ TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_extendible_hdf5_datas H5Dclose(h5_dset_id); H5Fclose(h5_file_id); - // TODO AUDIT - // // make sure we aren't leaking - //EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); + // make sure we aren't leaking + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_fixed_hdf5_dataset_handle_with_offset) { @@ -575,8 +561,6 @@ TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_fixed_hdf5_dataset_ha H5P_DEFAULT); // create a dataset for a 16-bit signed integer array with 2 elements - - hid_t h5_dtype = H5T_NATIVE_SHORT; hsize_t num_eles = 2; @@ -661,6 +645,7 @@ TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_fixed_hdf5_dataset_ha opts_read["offset"] = 0; opts_read["stride"] = 1; io::hdf5_read(h5_dset_id,opts_read,n_read); + // check values of data read_vals = n_read.value(); EXPECT_EQ(-1,read_vals[0]); @@ -686,13 +671,10 @@ TEST(conduit_relay_io_hdf5, write_and_read_conduit_leaf_to_fixed_hdf5_dataset_ha H5Dclose(h5_dset_id); H5Fclose(h5_file_id); - // TODO AUDIT! // make sure we aren't leaking - // EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, write_conduit_object_to_hdf5_group_handle_with_offset) { @@ -845,13 +827,10 @@ TEST(conduit_relay_io_hdf5, write_conduit_object_to_hdf5_group_handle_with_offse H5Gclose(h5_group_id); H5Fclose(h5_file_id); - // TODO AUDIT // make sure we aren't leaking - //EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, write_conduit_object_to_hdf5_group_handle) { @@ -987,7 +966,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_by_file_handle) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_write_to_existing_dset) { @@ -1165,7 +1143,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_leaf_arrays) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_empty) { @@ -1189,7 +1166,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_empty) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, hdf5_write_zero_sized_leaf) { @@ -1216,7 +1192,6 @@ TEST(conduit_relay_io_hdf5, hdf5_write_zero_sized_leaf) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_childless_object) { @@ -1240,8 +1215,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_childless_object) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_test_write_incompat) { @@ -1286,7 +1259,6 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_test_write_incompat) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, auto_endian) { @@ -1371,7 +1343,6 @@ TEST(conduit_relay_io_hdf5, hdf5_path_exists) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, hdf5_create_append_methods) { @@ -1444,8 +1415,6 @@ TEST(conduit_relay_io_hdf5, hdf5_create_append_methods) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, hdf5_create_open_methods) { @@ -1488,8 +1457,7 @@ TEST(conduit_relay_io_hdf5, hdf5_create_open_methods) // make sure we aren't leaking EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } -// -// + //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_save_generic_options) { @@ -1530,11 +1498,11 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_save_generic_options) EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } - - //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_save_libver) { + // get objects in flight already + int DO_NO_HARM = check_h5_open_ids(); Node n, opts; @@ -1546,6 +1514,11 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_save_libver) conduit::relay::io::hdf5_set_options(opts); + // show full set of options after set + Node curr_opts; + conduit::relay::io::hdf5_options(curr_opts); + std::cout<< curr_opts.to_yaml() << std::endl; + utils::remove_path_if_exists(tout); // bad libver EXPECT_THROW(io::save(n,tout, "hdf5"),Error); @@ -1556,6 +1529,9 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_save_libver) io::save(n,tout, "hdf5"); EXPECT_TRUE(utils::is_file(tout)); + + // make sure we aren't leaking + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } //----------------------------------------------------------------------------- @@ -1965,7 +1941,7 @@ TEST(conduit_relay_io_hdf5, test_read_various_string_style) // make sure we aren't leaking EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } -// + //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_string_compress) { @@ -2005,8 +1981,8 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_string_compress) // make sure we aren't leaking EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } -// -// + + //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_list) { @@ -2157,9 +2133,8 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_list_with_offset) EXPECT_EQ(4,read_vals[1]); EXPECT_EQ(2, read_vals.number_of_elements()); - // TODO AUDIT! // make sure we aren't leaking - // EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } //----------------------------------------------------------------------------- @@ -2235,9 +2210,8 @@ TEST(conduit_relay_io_hdf5, test_ref_path_error_msg) EXPECT_EQ(count,1); } - // // TODO AUDIT! - // //make sure we aren't leaking - // EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); + //make sure we aren't leaking + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } //----------------------------------------------------------------------------- @@ -2266,6 +2240,9 @@ TEST(conduit_relay_io_hdf5, wrong_proto_message) //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_error_writing_incompat_leaf) { + // get objects in flight already + int DO_NO_HARM = check_h5_open_ids(); + Node n; n["thing"].set(42); bool err_occured = false; @@ -2285,11 +2262,15 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_error_writing_incompat_leaf) } EXPECT_TRUE(err_occured); + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } //----------------------------------------------------------------------------- TEST(conduit_relay_io_hdf5, conduit_hdf5_error_writing_leaf_to_root) { + // get objects in flight already + int DO_NO_HARM = check_h5_open_ids(); + Node n; n.set(42); bool err_occured = false; @@ -2309,6 +2290,7 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_error_writing_leaf_to_root) } EXPECT_TRUE(err_occured); + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } @@ -2424,10 +2406,15 @@ TEST(conduit_relay_io_hdf5, conduit_hdf5_write_read_zfp_1d) // expect int diff to be in between -2 and 0 EXPECT_TRUE( (-2 <= vsi_diff.max()) && (vsi_diff.max() <=0) ); - // NOTE: This fails with 2 leaked objects // make sure we aren't leaking - // EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); + EXPECT_EQ(check_h5_open_ids(),DO_NO_HARM); } +//----------------------------------------------------------------------------- +TEST(conduit_relay_io_hdf5, conduit_hdf5_ZZZZ_final_handle_check) +{ + // get objects in flight already + int DO_NO_HARM = check_h5_open_ids(); +}