From 516167d31bd49ca3f291a66f01c132f5d427aae7 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 16 Oct 2023 05:52:23 -0700 Subject: [PATCH 001/101] Remove CANBE_UNUSED() from subfiling VFD (#3678) This macro was an attempt to quiet warnings about release mode unused variables that only appear in asserts. It resolves to a void cast, which doesn't quiet warnings when an assignment has already taken place. --- src/H5FDsubfiling/H5FDioc.c | 9 ++++----- src/H5FDsubfiling/H5FDsubfile_int.c | 3 ++- src/H5FDsubfiling/H5FDsubfiling_priv.h | 2 -- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/H5FDsubfiling/H5FDioc.c b/src/H5FDsubfiling/H5FDioc.c index 39766de6594..80771c02a53 100644 --- a/src/H5FDsubfiling/H5FDioc.c +++ b/src/H5FDsubfiling/H5FDioc.c @@ -32,8 +32,6 @@ #include "H5MMprivate.h" /* Memory management */ #include "H5Pprivate.h" /* Property lists */ -#define CANBE_UNUSED(X) (void)(X) - /* The driver identification number, initialized at runtime */ static hid_t H5FD_IOC_g = H5I_INVALID_HID; @@ -1219,13 +1217,14 @@ static herr_t H5FD__ioc_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNUSED dxpl_id, haddr_t addr, size_t size, void *buf) { - H5FD_ioc_t *file = (H5FD_ioc_t *)_file; - herr_t ret_value = SUCCEED; +#ifndef NDEBUG + H5FD_ioc_t *file = (H5FD_ioc_t *)_file; +#endif + herr_t ret_value = SUCCEED; H5FD_IOC_LOG_CALL(__func__); assert(file && file->pub.cls); - CANBE_UNUSED(file); assert(buf); /* Check for overflow conditions */ diff --git a/src/H5FDsubfiling/H5FDsubfile_int.c b/src/H5FDsubfiling/H5FDsubfile_int.c index cb210b6e4a6..a8500326dc7 100644 --- a/src/H5FDsubfiling/H5FDsubfile_int.c +++ b/src/H5FDsubfiling/H5FDsubfile_int.c @@ -343,11 +343,12 @@ H5FD__subfiling__get_real_eof(hid_t context_id, int64_t *logical_eof_ptr) H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Waitall", mpi_code); for (int i = 0; i < num_subfiles; i++) { +#ifndef NDEBUG int ioc_rank = (int)recv_msg[3 * i]; +#endif assert(ioc_rank >= 0); assert(ioc_rank < n_io_concentrators); - CANBE_UNUSED(ioc_rank); assert(sf_eofs[i] == -1); sf_eofs[i] = recv_msg[(3 * i) + 1]; diff --git a/src/H5FDsubfiling/H5FDsubfiling_priv.h b/src/H5FDsubfiling/H5FDsubfiling_priv.h index 9cc32f14abf..08fef7d1a01 100644 --- a/src/H5FDsubfiling/H5FDsubfiling_priv.h +++ b/src/H5FDsubfiling/H5FDsubfiling_priv.h @@ -63,6 +63,4 @@ H5_DLL herr_t H5FD__subfiling__get_real_eof(hid_t context_id, int64_t *logical_e } #endif -#define CANBE_UNUSED(X) (void)(X) - #endif /* H5FDsubfiling_priv_H */ From 1cbda391c4c709f60707a4ccfac2ebea1ce856e5 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 16 Oct 2023 05:53:37 -0700 Subject: [PATCH 002/101] Suppress MPI_Waitall warnings w/ MPICH (#3680) MPICH defines MPI_STATUSES_IGNORE (a pointer) to 1, which raises warnings w/ gcc. This is a known issue that the MPICH devs are not going to fix. See here: https://github.com/pmodels/mpich/issues/5687 This fix suppresses those issues w/ gcc --- src/H5FDsubfiling/H5FDioc_int.c | 6 ++++++ src/H5FDsubfiling/H5FDsubfile_int.c | 22 +++++++++++++--------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/H5FDsubfiling/H5FDioc_int.c b/src/H5FDsubfiling/H5FDioc_int.c index 674b08ddf94..5528fc8f8ff 100644 --- a/src/H5FDsubfiling/H5FDioc_int.c +++ b/src/H5FDsubfiling/H5FDioc_int.c @@ -423,8 +423,14 @@ ioc__async_completion(MPI_Request *mpi_reqs, size_t num_reqs) assert(mpi_reqs); H5_CHECK_OVERFLOW(num_reqs, size_t, int); + + /* Have to supppress gcc warnings regarding MPI_STATUSES_IGNORE + * with MPICH (https://github.com/pmodels/mpich/issues/5687) + */ + H5_GCC_DIAG_OFF("stringop-overflow") if (MPI_SUCCESS != (mpi_code = MPI_Waitall((int)num_reqs, mpi_reqs, MPI_STATUSES_IGNORE))) H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Waitall failed", mpi_code); + H5_GCC_DIAG_ON("stringop-overflow") done: H5_SUBFILING_FUNC_LEAVE; diff --git a/src/H5FDsubfiling/H5FDsubfile_int.c b/src/H5FDsubfiling/H5FDsubfile_int.c index a8500326dc7..a7dd86455cb 100644 --- a/src/H5FDsubfiling/H5FDsubfile_int.c +++ b/src/H5FDsubfiling/H5FDsubfile_int.c @@ -143,17 +143,19 @@ H5FD__subfiling__truncate_sub_files(hid_t context_id, int64_t logical_file_eof, } /* Wait for truncate operations to complete */ + H5_GCC_DIAG_OFF("stringop-overflow") if (MPI_SUCCESS != (mpi_code = MPI_Waitall(num_subfiles_owned, recv_reqs, MPI_STATUSES_IGNORE))) H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Waitall", mpi_code); - - /* sanity check -- compute the file eof using the same mechanism used to - * compute the subfile eof. Assert that the computed value and the - * actual value match. - * - * Do this only for debug builds -- probably delete this before release. - * - * JRM -- 12/15/21 - */ + H5_GCC_DIAG_ON("stringop-overflow") + + /* sanity check -- compute the file eof using the same mechanism used to + * compute the subfile eof. Assert that the computed value and the + * actual value match. + * + * Do this only for debug builds -- probably delete this before release. + * + * JRM -- 12/15/21 + */ #ifndef NDEBUG { @@ -339,8 +341,10 @@ H5FD__subfiling__get_real_eof(hid_t context_id, int64_t *logical_eof_ptr) } /* Wait for EOF communication to complete */ + H5_GCC_DIAG_OFF("stringop-overflow") if (MPI_SUCCESS != (mpi_code = MPI_Waitall(num_subfiles, recv_reqs, MPI_STATUSES_IGNORE))) H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Waitall", mpi_code); + H5_GCC_DIAG_ON("stringop-overflow") for (int i = 0; i < num_subfiles; i++) { #ifndef NDEBUG From e158217012646896a9da73d2884dce34c70d2996 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 16 Oct 2023 06:09:48 -0700 Subject: [PATCH 003/101] Fix a possible NULL pointer dereference in tests (#3676) The dtypes test could dereference a NULL pointer if a strdup call failed. --- test/dtypes.c | 218 +++++++++++++++++++++----------------------------- 1 file changed, 92 insertions(+), 126 deletions(-) diff --git a/test/dtypes.c b/test/dtypes.c index 74b6f617f1c..a8def070d0e 100644 --- a/test/dtypes.c +++ b/test/dtypes.c @@ -1983,26 +1983,36 @@ test_compound_10(void) cmpd_struct wdata[ARRAY_DIM]; cmpd_struct rdata[ARRAY_DIM]; - hid_t file; - hid_t arr_tid, cmpd_tid, cstr_id, vlstr_id; - hid_t space_id; - hid_t dset_id; + hid_t file = H5I_INVALID_HID; + hid_t arr_tid = H5I_INVALID_HID; + hid_t cmpd_tid = H5I_INVALID_HID; + hid_t cstr_id = H5I_INVALID_HID; + hid_t vlstr_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; hsize_t arr_dim[1] = {ARRAY_DIM}; /* Array dimensions */ hsize_t dim1[1]; - void *t1, *t2; + void *t1 = NULL; + void *t2 = NULL; char filename[1024]; size_t len; int i; TESTING("array datatype of compound type with VL string"); + memset(wdata, 0, sizeof(wdata)); + memset(rdata, 0, sizeof(rdata)); + + /* Initialize */ for (i = 0; i < ARRAY_DIM; i++) { - wdata[i].i1 = i * 10 + i; - wdata[i].str = strdup("C string A"); + wdata[i].i1 = i * 10 + i; + if (NULL == (wdata[i].str = strdup("C string A"))) + FAIL_PUTS_ERROR("Unable to duplicate string"); wdata[i].str[9] = (char)(wdata[i].str[9] + i); wdata[i].i2 = i * 1000 + i * 10; - wdata[i].text.p = (void *)strdup("variable-length text A\0"); + if (NULL == (wdata[i].text.p = (void *)strdup("variable-length text A\0"))) + FAIL_PUTS_ERROR("Unable to duplicate string"); len = wdata[i].text.len = strlen((char *)wdata[i].text.p) + 1; ((char *)(wdata[i].text.p))[len - 2] = (char)(((char *)(wdata[i].text.p))[len - 2] + i); ((char *)(wdata[i].text.p))[len - 1] = '\0'; @@ -2010,160 +2020,116 @@ test_compound_10(void) /* Create File */ h5_fixname(FILENAME[4], H5P_DEFAULT, filename, sizeof filename); - if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) { - H5_FAILED(); - AT(); - printf("Can't create file!\n"); - goto error; - } /* end if */ + if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; /* Create first compound datatype */ - if ((cmpd_tid = H5Tcreate(H5T_COMPOUND, sizeof(struct cmpd_struct))) < 0) { - H5_FAILED(); - AT(); - printf("Can't create datatype!\n"); - goto error; - } /* end if */ + if ((cmpd_tid = H5Tcreate(H5T_COMPOUND, sizeof(struct cmpd_struct))) < 0) + TEST_ERROR; - if (H5Tinsert(cmpd_tid, "i1", HOFFSET(struct cmpd_struct, i1), H5T_NATIVE_INT) < 0) { - H5_FAILED(); - AT(); - printf("Can't insert field 'i1'\n"); - goto error; - } /* end if */ + if (H5Tinsert(cmpd_tid, "i1", HOFFSET(struct cmpd_struct, i1), H5T_NATIVE_INT) < 0) + TEST_ERROR; - cstr_id = H5Tcopy(H5T_C_S1); - if (H5Tset_size(cstr_id, H5T_VARIABLE) < 0) { - H5_FAILED(); - AT(); - printf("Can't set size for C string\n"); - goto error; - } /* end if */ + if ((cstr_id = H5Tcopy(H5T_C_S1)) < 0) + TEST_ERROR; + if (H5Tset_size(cstr_id, H5T_VARIABLE) < 0) + TEST_ERROR; - if (H5Tinsert(cmpd_tid, "c_string", HOFFSET(cmpd_struct, str), cstr_id) < 0) { - H5_FAILED(); - AT(); - printf("Can't insert field 'str'\n"); - goto error; - } /* end if */ + if (H5Tinsert(cmpd_tid, "c_string", HOFFSET(cmpd_struct, str), cstr_id) < 0) + TEST_ERROR; /* Create vl-string datatype */ - if ((vlstr_id = H5Tvlen_create(H5T_NATIVE_CHAR)) < 0) { - H5_FAILED(); - AT(); - printf("Can't create VL string\n"); - goto error; - } /* end if */ + if ((vlstr_id = H5Tvlen_create(H5T_NATIVE_CHAR)) < 0) + TEST_ERROR; - if (H5Tinsert(cmpd_tid, "vl_string", HOFFSET(cmpd_struct, text), vlstr_id) < 0) { - H5_FAILED(); - AT(); - printf("Can't insert field 'text'\n"); - goto error; - } /* end if */ + if (H5Tinsert(cmpd_tid, "vl_string", HOFFSET(cmpd_struct, text), vlstr_id) < 0) + TEST_ERROR; - if (H5Tinsert(cmpd_tid, "i2", HOFFSET(struct cmpd_struct, i2), H5T_NATIVE_INT) < 0) { - H5_FAILED(); - AT(); - printf("Can't insert field 'i2'\n"); - goto error; - } /* end if */ + if (H5Tinsert(cmpd_tid, "i2", HOFFSET(struct cmpd_struct, i2), H5T_NATIVE_INT) < 0) + TEST_ERROR; /* Create the array datatype for c_string data */ - if ((arr_tid = H5Tarray_create2(cmpd_tid, 1, arr_dim)) < 0) { - H5_FAILED(); - AT(); - printf("Can't create array type\n"); - goto error; - } /* end if */ + if ((arr_tid = H5Tarray_create2(cmpd_tid, 1, arr_dim)) < 0) + TEST_ERROR; dim1[0] = 1; - if ((space_id = H5Screate_simple(1, dim1, NULL)) < 0) { - H5_FAILED(); - AT(); - printf("Can't create space\n"); - goto error; - } /* end if */ + if ((space_id = H5Screate_simple(1, dim1, NULL)) < 0) + TEST_ERROR; - if ((dset_id = H5Dcreate2(file, "Dataset", arr_tid, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < - 0) { - H5_FAILED(); - AT(); - printf("Can't create dataset\n"); - goto error; - } /* end if */ + if ((dset_id = H5Dcreate2(file, "Dataset", arr_tid, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; - if (H5Dwrite(dset_id, arr_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0) { - H5_FAILED(); - AT(); - printf("Can't write data\n"); - goto error; - } /* end if */ + if (H5Dwrite(dset_id, arr_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0) + TEST_ERROR; - if (H5Dread(dset_id, arr_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) { - H5_FAILED(); - AT(); - printf("Can't read data\n"); - goto error; - } /* end if */ + if (H5Dread(dset_id, arr_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + TEST_ERROR; for (i = 0; i < ARRAY_DIM; i++) { if (rdata[i].i1 != wdata[i].i1 || rdata[i].i2 != wdata[i].i2 || - strcmp(rdata[i].str, wdata[i].str) != 0) { - H5_FAILED(); - AT(); - printf("incorrect read data\n"); - goto error; - } /* end if */ + strcmp(rdata[i].str, wdata[i].str) != 0) + FAIL_PUTS_ERROR("incorrect read data\n"); - if (rdata[i].text.len != wdata[i].text.len) { - H5_FAILED(); - AT(); - printf("incorrect VL length\n"); - goto error; - } /* end if */ + if (rdata[i].text.len != wdata[i].text.len) + FAIL_PUTS_ERROR("incorrect VL length\n"); t1 = rdata[i].text.p; t2 = wdata[i].text.p; - if (strcmp((char *)t1, (char *)t2) != 0) { - H5_FAILED(); - AT(); - printf("incorrect VL read data\n"); - goto error; - } - } /* end for */ - if (H5Treclaim(arr_tid, space_id, H5P_DEFAULT, &rdata) < 0) { - H5_FAILED(); - AT(); - printf("Can't reclaim read data\n"); - goto error; - } /* end if */ - if (H5Treclaim(arr_tid, space_id, H5P_DEFAULT, &wdata) < 0) { - H5_FAILED(); - AT(); - printf("Can't reclaim read data\n"); - goto error; - } /* end if */ + if (strcmp((char *)t1, (char *)t2) != 0) + FAIL_PUTS_ERROR("incorrect VL read data\n"); + } + + if (H5Treclaim(arr_tid, space_id, H5P_DEFAULT, &rdata) < 0) + TEST_ERROR; + if (H5Treclaim(arr_tid, space_id, H5P_DEFAULT, &wdata) < 0) + TEST_ERROR; if (H5Dclose(dset_id) < 0) - goto error; + TEST_ERROR; if (H5Tclose(arr_tid) < 0) - goto error; + TEST_ERROR; + arr_tid = H5I_INVALID_HID; if (H5Tclose(cmpd_tid) < 0) - goto error; + TEST_ERROR; if (H5Tclose(cstr_id) < 0) - goto error; + TEST_ERROR; if (H5Tclose(vlstr_id) < 0) - goto error; + TEST_ERROR; if (H5Sclose(space_id) < 0) - goto error; + TEST_ERROR; + space_id = H5I_INVALID_HID; if (H5Fclose(file) < 0) - goto error; + TEST_ERROR; PASSED(); return 0; error: + + H5E_BEGIN_TRY + { + if (arr_tid != H5I_INVALID_HID && space_id != H5I_INVALID_HID) { + H5Treclaim(arr_tid, space_id, H5P_DEFAULT, &rdata); + H5Treclaim(arr_tid, space_id, H5P_DEFAULT, &wdata); + } + else { + /* Clean up memory if we failed out early */ + for (i = 0; i < ARRAY_DIM; i++) { + free(wdata[i].str); + free(wdata[i].text.p); + } + } + + H5Dclose(dset_id); + H5Tclose(arr_tid); + H5Tclose(cmpd_tid); + H5Tclose(cstr_id); + H5Tclose(vlstr_id); + H5Sclose(space_id); + H5Fclose(file); + } + H5E_END_TRY + return 1; } From ccb1a917b6e68bc51a6a4e3d6783edfa056b01bf Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:19:15 -0700 Subject: [PATCH 004/101] Fix printf warnings in t_mpi (#3679) * Fix printf warnings in t_mpi The type of MPI_Offset varies with implementation. In MPICH, it's long, which raises warnings when we attempt to use long long format specifiers. Casting to long long fixes the warnings. --- testpar/t_mpi.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c index 70a76ecdb32..eff39d057e0 100644 --- a/testpar/t_mpi.c +++ b/testpar/t_mpi.c @@ -306,12 +306,14 @@ test_mpio_gb_file(char *filename) for (i = ntimes - 2; i <= ntimes; i++) { mpi_off = (i * mpi_size + mpi_rank) * (MPI_Offset)MB; if (VERBOSE_MED) - fprintf(stdout, "proc %d: write to mpi_off=%016llx, %lld\n", mpi_rank, mpi_off, mpi_off); + fprintf(stdout, "proc %d: write to mpi_off=%016llx, %lld\n", mpi_rank, (long long)mpi_off, + (long long)mpi_off); /* set data to some trivial pattern for easy verification */ for (j = 0; j < MB; j++) *(buf + j) = (int8_t)(i * mpi_size + mpi_rank); if (VERBOSE_MED) - fprintf(stdout, "proc %d: writing %d bytes at offset %lld\n", mpi_rank, MB, mpi_off); + fprintf(stdout, "proc %d: writing %d bytes at offset %lld\n", mpi_rank, MB, + (long long)mpi_off); mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat); INFO((mrc == MPI_SUCCESS), "GB size file write"); if (mrc != MPI_SUCCESS) @@ -345,7 +347,8 @@ test_mpio_gb_file(char *filename) for (i = ntimes - 2; i <= ntimes; i++) { mpi_off = (i * mpi_size + (mpi_size - mpi_rank - 1)) * (MPI_Offset)MB; if (VERBOSE_MED) - fprintf(stdout, "proc %d: read from mpi_off=%016llx, %lld\n", mpi_rank, mpi_off, mpi_off); + fprintf(stdout, "proc %d: read from mpi_off=%016llx, %lld\n", mpi_rank, + (long long)mpi_off, (long long)mpi_off); mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat); INFO((mrc == MPI_SUCCESS), "GB size file read"); expected = (int8_t)(i * mpi_size + (mpi_size - mpi_rank - 1)); From 85507981000e5d68788a3ff00f66adb1b0aca842 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:20:18 -0700 Subject: [PATCH 005/101] Fix invalid memory access in S3 comms (#3681) In the ros3 VFD, passing an empty string parameter to an internal API call could result in accessing the -1th element of a string. This would cause failures on big-endian systems like s390x. This parameter is now checked before writing to the string. Fixes GitHub #1168 --- src/H5FDs3comms.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/H5FDs3comms.c b/src/H5FDs3comms.c index 58fc4355e63..dae1149ae60 100644 --- a/src/H5FDs3comms.c +++ b/src/H5FDs3comms.c @@ -1713,7 +1713,8 @@ H5FD_s3comms_aws_canonical_request(char *canonical_request_dest, int _cr_size, c } /* end while node is not NULL */ /* remove trailing ';' from signed headers sequence */ - signed_headers_dest[strlen(signed_headers_dest) - 1] = '\0'; + if (*signed_headers_dest != '\0') + signed_headers_dest[strlen(signed_headers_dest) - 1] = '\0'; /* append signed headers and payload hash * NOTE: at present, no HTTP body is handled, per the nature of From 1b62827204d4fd924794c2bcb6456de70f6048b0 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:21:01 -0700 Subject: [PATCH 006/101] Add Doxygen for H5Pset_fapl_sec2() (#3685) * --- src/H5FDsec2.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/H5FDsec2.h b/src/H5FDsec2.h index 56f45751d32..a2590aee968 100644 --- a/src/H5FDsec2.h +++ b/src/H5FDsec2.h @@ -23,7 +23,23 @@ extern "C" { #endif -H5_DLL hid_t H5FD_sec2_init(void); +H5_DLL hid_t H5FD_sec2_init(void); + +/** + * \ingroup FAPL + * + * \brief Modifies the file access property list to use the #H5FD_SEC2 driver + * + * \fapl_id + * + * \returns \herr_t + * + * \details H5Pset_fapl_sec2() modifies the file access property list to use the + * #H5FD_SEC2 driver. + * + * \since 1.4.0 + * + */ H5_DLL herr_t H5Pset_fapl_sec2(hid_t fapl_id); #ifdef __cplusplus From 74fabd144e4355cfebc8d3e78b96c3e9c4ee7705 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Tue, 17 Oct 2023 11:12:16 -0500 Subject: [PATCH 007/101] switch to using time function instead of date function (#3690) --- utils/subfiling_vfd/h5fuse.sh.in | 206 ++++++++++++++++--------------- 1 file changed, 105 insertions(+), 101 deletions(-) diff --git a/utils/subfiling_vfd/h5fuse.sh.in b/utils/subfiling_vfd/h5fuse.sh.in index c6d715fd0a9..82d497e3f1c 100755 --- a/utils/subfiling_vfd/h5fuse.sh.in +++ b/utils/subfiling_vfd/h5fuse.sh.in @@ -13,7 +13,6 @@ BLD='\033[1m' GRN='\033[0;32m' RED='\033[0;31m' -PUR='\033[0;35m' CYN='\033[0;36m' NC='\033[0m' # No Color @@ -58,6 +57,106 @@ EOL } +function fuse { + +# function for fusing the files + +mpi_rank=0 +mpi_size=1 +nstart=1 +nend=$nsubfiles + +if [ "$parallel" == "true" ]; then + + hex=$(hexdump -n 16 -v -e '/1 "%02X"' /dev/urandom) + c_exec="h5fuse_"${hex} + c_src=${c_exec}.c + + # Generate and compile an MPI program to get MPI rank and size + if [ ! -f "${c_src}" ]; then + gen_mpi + CC=@CC@ + ${CC} "${c_src}" -o "${c_exec}" + fi + wait + rank_size=$(./"${c_exec}") + read -r mpi_rank mpi_size <<<"$rank_size" + + rm -f "${c_src}" "${c_exec}" + + # Divide the subfiles among the ranks + iwork1=$(( nsubfiles / mpi_size )) + iwork2=$(( nsubfiles % mpi_size )) + min=$(( mpi_rank < iwork2 ? mpi_rank : iwork2 )) + nstart=$(( mpi_rank * iwork1 + 1 + min )) + nend=$(( nstart + iwork1 - 1 )) + if [ $iwork2 -gt "$mpi_rank" ]; then + nend=$(( nend + 1 )) + fi +fi + +############################################################ +# COMBINE SUBFILES INTO AN HDF5 FILE # +############################################################ +icnt=1 +skip=0 +seek=0 +seek_cnt=0 +for i in "${subfiles[@]}"; do + + subfile="${subfile_dir}/${i}" + + # bs=BYTES read and write up to BYTES bytes at a time; overrides ibs and obs + # ibs=BYTES read up to BYTES bytes at a time + # obs=BYTES write BYTES bytes at a time + # seek=N skip N obs-sized blocks at start of output + # skip=N skip N ibs-sized blocks at start of input + + status=1 + fsize=${subfiles_size[icnt-1]} + if [ "$fsize" -eq "0" ]; then + seek_cnt=$((seek_cnt+1)) + seek=$seek_cnt + if [ "$rm_subf" == "true" ]; then + if [ -f "${subfile}" ]; then + \rm -f "$subfile" + fi + fi + else + if [ $icnt -ge "$nstart" ] && [ $icnt -le "$nend" ]; then + records_left=$fsize + while [ "$status" -gt 0 ]; do + if [ $((skip*stripe_size)) -le "$fsize" ] && [ "$records_left" -gt 0 ]; then + EXEC="dd count=1 bs=$stripe_size if=$subfile of=$hdf5_file skip=$skip seek=$seek conv=notrunc" + if [ "$verbose" == "true" ]; then + echo -e "$GRN $EXEC $NC" + fi + err=$( $EXEC 2>&1 1>/dev/null ) + if [ $? -ne 0 ]; then + echo -e "$CYN ERR: dd Utility Failed $NC" + echo -e "$CYN MSG: $err $NC" + exit $FAILED + fi + records_left=$((records_left-stripe_size)) + skip=$((skip+1)) + seek=$((seek_cnt+skip*nsubfiles)) + else + status=0 + skip=0 + fi + done; wait + if [ "$rm_subf" == "true" ]; then + \rm -f "$subfile" + fi + fi + seek_cnt=$((seek_cnt+1)) + seek=$seek_cnt + fi + icnt=$(( icnt +1 )) +done; wait + +} + ############################################################ ############################################################ # Main program # @@ -166,104 +265,9 @@ for i in "${subfiles[@]}"; do fi done -START="$(date +%s%N)" - -mpi_rank=0 -mpi_size=1 -nstart=1 -nend=$nsubfiles - -if [ "$parallel" == "true" ]; then - - hex=$(hexdump -n 16 -v -e '/1 "%02X"' /dev/urandom) - c_exec="h5fuse_"${hex} - c_src=${c_exec}.c - - # Generate and compile an MPI program to get MPI rank and size - if [ ! -f "${c_src}" ]; then - gen_mpi - CC=@CC@ - ${CC} "${c_src}" -o "${c_exec}" - fi - wait - rank_size=$(./"${c_exec}") - read -r mpi_rank mpi_size <<<"$rank_size" - - rm -f "${c_src}" "${c_exec}" - - # Divide the subfiles among the ranks - iwork1=$(( nsubfiles / mpi_size )) - iwork2=$(( nsubfiles % mpi_size )) - min=$(( mpi_rank < iwork2 ? mpi_rank : iwork2 )) - nstart=$(( mpi_rank * iwork1 + 1 + min )) - nend=$(( nstart + iwork1 - 1 )) - if [ $iwork2 -gt "$mpi_rank" ]; then - nend=$(( nend + 1 )) - fi -fi - -############################################################ -# COMBINE SUBFILES INTO AN HDF5 FILE # -############################################################ -icnt=1 -skip=0 -seek=0 -seek_cnt=0 -for i in "${subfiles[@]}"; do - - subfile="${subfile_dir}/${i}" - - # bs=BYTES read and write up to BYTES bytes at a time; overrides ibs and obs - # ibs=BYTES read up to BYTES bytes at a time - # obs=BYTES write BYTES bytes at a time - # seek=N skip N obs-sized blocks at start of output - # skip=N skip N ibs-sized blocks at start of input - - status=1 - fsize=${subfiles_size[icnt-1]} - if [ "$fsize" -eq "0" ]; then - seek_cnt=$((seek_cnt+1)) - seek=$seek_cnt - if [ "$rm_subf" == "true" ]; then - if [ -f "${subfile}" ]; then - \rm -f "$subfile" - fi - fi - else - if [ $icnt -ge "$nstart" ] && [ $icnt -le "$nend" ]; then - records_left=$fsize - while [ "$status" -gt 0 ]; do - if [ $((skip*stripe_size)) -le "$fsize" ] && [ "$records_left" -gt 0 ]; then - EXEC="dd count=1 bs=$stripe_size if=$subfile of=$hdf5_file skip=$skip seek=$seek conv=notrunc" - if [ "$verbose" == "true" ]; then - echo -e "$GRN $EXEC $NC" - fi - err=$( $EXEC 2>&1 1>/dev/null ) - if [ $? -ne 0 ]; then - echo -e "$CYN ERR: dd Utility Failed $NC" - echo -e "$CYN MSG: $err $NC" - exit $FAILED - fi - records_left=$((records_left-stripe_size)) - skip=$((skip+1)) - seek=$((seek_cnt+skip*nsubfiles)) - else - status=0 - skip=0 - fi - done; wait - if [ "$rm_subf" == "true" ]; then - \rm -f "$subfile" - fi - fi - seek_cnt=$((seek_cnt+1)) - seek=$seek_cnt - fi - icnt=$(( icnt +1 )) -done; wait - -END=$(( $(date +%s%N) - START )) -DURATION_SEC=$(awk -vp="$END" -vq=0.000000001 'BEGIN{printf "%.4f" ,p * q}') if [ "$quiet" == "false" ]; then - echo -e "$PUR COMPLETION TIME = $DURATION_SEC s $NC" -fi \ No newline at end of file + TIMEFORMAT="COMPLETION TIME = %R s" + time fuse +else + fuse +fi From a6bc1186c35ad0b4ee2df45144124495243a82d6 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 17 Oct 2023 11:16:48 -0500 Subject: [PATCH 008/101] Initialize API context MPI types to MPI_BYTE (#3688) --- src/H5CX.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/H5CX.c b/src/H5CX.c index aa9d0b5aece..b3b2fca41b9 100644 --- a/src/H5CX.c +++ b/src/H5CX.c @@ -778,6 +778,11 @@ H5CX__push_common(H5CX_node_t *cnode) cnode->ctx.tag = H5AC__INVALID_TAG; cnode->ctx.ring = H5AC_RING_USER; +#ifdef H5_HAVE_PARALLEL + cnode->ctx.btype = MPI_BYTE; + cnode->ctx.ftype = MPI_BYTE; +#endif + /* Push context node onto stack */ cnode->next = *head; *head = cnode; From 45c4729799ba65fbca80fa5e600264e3ed0aa999 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 17 Oct 2023 12:40:42 -0500 Subject: [PATCH 009/101] Add test info output to t_filters_parallel (#3696) --- testpar/t_filters_parallel.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 0f08be9344d..bdfde0972b5 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -9707,6 +9707,7 @@ int main(int argc, char **argv) { unsigned seed; + double total_test_time = 0.0; size_t cur_filter_idx = 0; size_t num_filters = 0; hid_t file_id = H5I_INVALID_HID; @@ -9798,8 +9799,13 @@ main(int argc, char **argv) srand(seed); - if (MAINPROCESS) - printf("Using seed: %u\n\n", seed); + /* Print test settings */ + if (MAINPROCESS) { + printf("Test Info:\n"); + printf(" MPI size: %d\n", mpi_size); + printf(" Test express level: %d\n", test_express_level_g); + printf(" Using seed: %u\n\n", seed); + } num_filters = ARRAY_SIZE(filterIDs); @@ -9884,6 +9890,8 @@ main(int argc, char **argv) const char *alloc_time; const char *mode; unsigned filter_config; + double start_time = 0.0; + double end_time = 0.0; char group_name[512]; switch (sel_io_mode) { @@ -9948,7 +9956,7 @@ main(int argc, char **argv) continue; } - if (MAINPROCESS) + if (MAINPROCESS) { printf("== Running tests in mode '%s' with filter '%s' using selection I/O mode " "'%s', '%s' and '%s' allocation time ==\n\n", test_mode_to_string(test_mode), filterNames[cur_filter_idx], sel_io_str, @@ -9956,6 +9964,9 @@ main(int argc, char **argv) : "Multi-Chunk I/O", alloc_time); + start_time = MPI_Wtime(); + } + /* Get the current filter's info */ VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), "H5Zget_filter_info succeeded"); @@ -10018,6 +10029,12 @@ main(int argc, char **argv) if (MAINPROCESS) puts(""); + + if (MAINPROCESS) { + end_time = MPI_Wtime(); + total_test_time += end_time - start_time; + printf("Tests took %f seconds\n\n", end_time - start_time); + } } } } @@ -10041,7 +10058,7 @@ main(int argc, char **argv) goto exit; if (MAINPROCESS) - puts("All Parallel Filters tests passed\n"); + printf("All Parallel Filters tests passed - total test time was %f seconds\n", total_test_time); exit: if (nerrors) From e2d40ef16e014442fccea32bce2184fb2eb8e113 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:05:20 -0700 Subject: [PATCH 010/101] Suppress format string warnings in subfiling test (#3699) --- testpar/t_subfiling_vfd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index ccece41b846..7c565997b3d 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -382,16 +382,20 @@ test_config_file(void) substr = strstr(config_buf, "hdf5_file"); VRFY(substr, "strstr succeeded"); + H5_GCC_CLANG_DIAG_OFF("format-nonliteral") snprintf(scan_format, sizeof(scan_format), "hdf5_file=%%%zus", (size_t)(PATH_MAX - 1)); VRFY((sscanf(substr, scan_format, tmp_buf) == 1), "sscanf succeeded"); + H5_GCC_CLANG_DIAG_ON("format-nonliteral") VRFY((strcmp(tmp_buf, resolved_path) == 0), "strcmp succeeded"); substr = strstr(config_buf, "subfile_dir"); VRFY(substr, "strstr succeeded"); + H5_GCC_CLANG_DIAG_OFF("format-nonliteral") snprintf(scan_format, sizeof(scan_format), "subfile_dir=%%%zus", (size_t)(PATH_MAX - 1)); VRFY((sscanf(substr, scan_format, tmp_buf) == 1), "sscanf succeeded"); + H5_GCC_CLANG_DIAG_ON("format-nonliteral") VRFY((strcmp(tmp_buf, subfile_dir) == 0), "strcmp succeeded"); From 3523d4ba66a6256e74f3896a039d87f0398d68e7 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:14:24 -0700 Subject: [PATCH 011/101] Fix unused variable in tselect.c (#3701) --- test/tselect.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/tselect.c b/test/tselect.c index 55599b3324e..55430f24ad5 100644 --- a/test/tselect.c +++ b/test/tselect.c @@ -1875,8 +1875,8 @@ test_select_hyper_contig3(hid_t dset_type, hid_t xfer_plist) ** ****************************************************************/ static void -verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t H5_ATTR_NDEBUG_UNUSED cube_size, - unsigned edge_size, unsigned cube_rank) +verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t cube_size, unsigned edge_size, + unsigned cube_rank) { const uint16_t *cube_ptr; /* Pointer into the cube buffer */ uint16_t expected_value; /* Expected value in dataset */ @@ -1902,7 +1902,9 @@ verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t H5_ATTR m = 0; do { /* Sanity check */ - assert(s < cube_size); + if (s >= cube_size) + TestErrPrintf("s should not be >= cube_size! s = %zu, cube_size = %zu\n", s, + cube_size); /* Check for correct value */ if (*cube_ptr != expected_value) From 08d0909c9c2916ae7f3cc5f8918b7066a9847433 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:22:10 -0700 Subject: [PATCH 012/101] Fix unused variable warning in H5F_sfile_assert_num (#3700) --- src/H5Fsfile.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/H5Fsfile.c b/src/H5Fsfile.c index ef80a799ca1..6cf2c809b8c 100644 --- a/src/H5Fsfile.c +++ b/src/H5Fsfile.c @@ -47,14 +47,17 @@ static H5F_sfile_node_t *H5F_sfile_head_s = NULL; *------------------------------------------------------------------------- */ void -H5F_sfile_assert_num(unsigned n) +H5F_sfile_assert_num(unsigned H5_ATTR_NDEBUG_UNUSED n) { FUNC_ENTER_NOAPI_NOINIT_NOERR + /* The only useful work this function does is asserting so when NDEBUG + * is defined it's a no-op. + */ +#ifndef NDEBUG if (n == 0) { - /* Sanity checking */ assert(H5F_sfile_head_s == NULL); - } /* end if */ + } else { unsigned count; /* Number of open shared files */ H5F_sfile_node_t *curr; /* Current shared file node */ @@ -68,11 +71,11 @@ H5F_sfile_assert_num(unsigned n) /* Advance to next shared file node */ curr = curr->next; - } /* end while */ + } - /* Sanity checking */ assert(count == n); - } /* end else */ + } +#endif FUNC_LEAVE_NOAPI_VOID } /* H5F_sfile_assert_num() */ From 2a3b511e96fbc468ef2b934eb169048e723a2f34 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:22:27 -0700 Subject: [PATCH 013/101] Restore floating-point suffixes in tests (#3698) A prior commit removed too many F suffixes. This restores the suffixes for float variables. --- hl/test/test_table.c | 22 +++++++++++----------- test/dt_arith.c | 12 ++++++------ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/hl/test/test_table.c b/hl/test/test_table.c index c475e7fee95..c6614343037 100644 --- a/hl/test/test_table.c +++ b/hl/test/test_table.c @@ -198,7 +198,7 @@ test_table(hid_t fid, int do_write) hsize_t chunk_size = 10; int compress = 0; int *fill = NULL; - particle_t fill1[1] = {{"no data", -1, -99.0, -99.0, -1}}; + particle_t fill1[1] = {{"no data", -1, -99.0F, -99.0, -1}}; int fill1_new[1] = {-100}; hsize_t position; char tname[20]; @@ -226,23 +226,23 @@ test_table(hid_t fid, int do_write) particle2_t rbuf2[NRECORDS]; particle3_t rbuf3[NRECORDS]; particle_t rbufc[NRECORDS * 2]; - particle_t abuf[2] = {{"eight", 80, 8.0, 80.0, 80}, {"nine", 90, 9.0, 90.0, 90}}; - particle_t ibuf[2] = {{"zero", 0, 0.0, 0.0, 0}, {"zero", 0, 0.0, 0.0, 0}}; + particle_t abuf[2] = {{"eight", 80, 8.0F, 80.0, 80}, {"nine", 90, 9.0F, 90.0, 90}}; + particle_t ibuf[2] = {{"zero", 0, 0.0F, 0.0, 0}, {"zero", 0, 0.0F, 0.0, 0}}; particle_t wbufd[NRECORDS]; particle_t wbuf[NRECORDS] = {{ "zero", 0, - 0.0, + 0.0F, 0.0, 0, }, - {"one", 10, 1.0, 10.0, 10}, - {"two", 20, 2.0, 20.0, 20}, - {"three", 30, 3.0, 30.0, 30}, - {"four", 40, 4.0, 40.0, 40}, - {"five", 50, 5.0, 50.0, 50}, - {"six", 60, 6.0, 60.0, 60}, - {"seven", 70, 7.0, 70.0, 70}}; + {"one", 10, 1.0F, 10.0, 10}, + {"two", 20, 2.0F, 20.0, 20}, + {"three", 30, 3.0F, 30.0, 30}, + {"four", 40, 4.0F, 40.0, 40}, + {"five", 50, 5.0F, 50.0, 50}, + {"six", 60, 6.0F, 60.0, 60}, + {"seven", 70, 7.0F, 70.0, 70}}; /* buffers for the field "Pressure" and "New_field" */ float pressure_in[NRECORDS] = {0.0F, 1.0F, 2.0F, 3.0F, 4.0F, 5.0F, 6.0F, 7.0F}; float pressure_out[NRECORDS]; diff --git a/test/dt_arith.c b/test/dt_arith.c index ab89b689837..d3147cbd577 100644 --- a/test/dt_arith.c +++ b/test/dt_arith.c @@ -696,7 +696,7 @@ test_particular_fp_integer(void) /* Print errors */ if (dst_i != fill_value) { - float x = 0.0; + float x = 0.0F; int y; if (0 == fails_this_test++) @@ -2637,7 +2637,7 @@ my_isnan(dtype_t type, void *val) char s[256]; if (FLT_FLOAT == type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, val, sizeof(float)); retval = isnan(x); } @@ -2663,7 +2663,7 @@ my_isnan(dtype_t type, void *val) */ if (!retval) { if (FLT_FLOAT == type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, val, sizeof(float)); snprintf(s, sizeof(s), "%g", (double)x); @@ -3115,7 +3115,7 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) int check_expo[2]; if (FLT_FLOAT == dst_type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, &buf[j * dst_size], sizeof(float)); if (underflow && fabsf(x) <= FLT_MIN && fabsf(hw_f) <= FLT_MIN) continue; /* all underflowed, no error */ @@ -3185,7 +3185,7 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) printf(" %02x", saved[j * src_size + ENDIAN(src_size, k, sendian)]); printf("%*s", (int)(3 * MAX(0, (ssize_t)dst_size - (ssize_t)src_size)), ""); if (FLT_FLOAT == src_type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, &saved[j * src_size], sizeof(float)); printf(" %29.20e\n", (double)x); } @@ -3207,7 +3207,7 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) printf(" %02x", buf[j * dst_size + ENDIAN(dst_size, k, dendian)]); printf("%*s", (int)(3 * MAX(0, (ssize_t)src_size - (ssize_t)dst_size)), ""); if (FLT_FLOAT == dst_type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, &buf[j * dst_size], sizeof(float)); printf(" %29.20e\n", (double)x); } From fc788559f7747eeefd152fc8c7844f45d3845300 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 17 Oct 2023 20:32:31 -0500 Subject: [PATCH 014/101] Correct use of set() command with options (#3667) (#3703) * Correct use of set() command with options * Force filter off if not found Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> --- CMakeFilters.cmake | 18 ++++++++++-------- CMakePlugins.cmake | 7 +++---- config/cmake/HDF5PluginMacros.cmake | 4 ++-- release_docs/INSTALL_CMake.txt | 18 +++++++++--------- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake index 042bfdc356b..da8e2c74ff4 100644 --- a/CMakeFilters.cmake +++ b/CMakeFilters.cmake @@ -10,10 +10,9 @@ # help@hdfgroup.org. # option (USE_LIBAEC_STATIC "Use static AEC library " OFF) -option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0) -option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0) +option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" OFF) +option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" OFF) -set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) if (NOT ZLIB_USE_LOCALCONTENT) set (ZLIB_URL ${ZLIB_TGZ_ORIGPATH}/${ZLIB_TGZ_NAME}) else () @@ -23,7 +22,6 @@ if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") message (VERBOSE "Filter ZLIB file is ${ZLIB_URL}") endif () -set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) if (NOT LIBAEC_USE_LOCALCONTENT) set (SZIP_URL ${LIBAEC_TGZ_ORIGPATH}/${LIBAEC_TGZ_NAME}) else () @@ -38,8 +36,8 @@ include (ExternalProject) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) - set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) + set (ZLIB_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for ZLIB") + set (SZIP_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for SZIP") if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (ZLIB_URL ${ZLIB_GIT_URL} CACHE STRING "Path to zlib git repository") set (ZLIB_BRANCH ${ZLIB_GIT_BRANCH}) @@ -62,8 +60,10 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT endif () endif () else () - set (ZLIB_USE_EXTERNAL 0) - set (SZIP_USE_EXTERNAL 0) + set (HDF5_ENABLE_Z_LIB_SUPPORT OFF CACHE BOOL "" FORCE) + set (ZLIB_USE_EXTERNAL OFF CACHE BOOL "Use External Library Building for ZLIB") + set (HDF5_ENABLE_SZIP_SUPPORT OFF CACHE BOOL "" FORCE) + set (SZIP_USE_EXTERNAL OFF CACHE BOOL "Use External Library Building for SZIP") endif () endif () @@ -107,6 +107,7 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT) INCLUDE_DIRECTORIES (${ZLIB_INCLUDE_DIRS}) message (VERBOSE "Filter HDF5_ZLIB is ON") else () + set (HDF5_ENABLE_Z_LIB_SUPPORT OFF CACHE BOOL "" FORCE) message (WARNING " ZLib support in HDF5 was enabled but not found") endif () endif () @@ -157,6 +158,7 @@ if (HDF5_ENABLE_SZIP_SUPPORT) set (EXTERNAL_FILTERS "${EXTERNAL_FILTERS} ENCODE") endif () else () + set (HDF5_ENABLE_SZIP_SUPPORT OFF CACHE BOOL "" FORCE) message (WARNING "SZIP support in HDF5 was enabled but not found") endif () endif () diff --git a/CMakePlugins.cmake b/CMakePlugins.cmake index b96d1ee0466..bc1074fbba3 100644 --- a/CMakePlugins.cmake +++ b/CMakePlugins.cmake @@ -9,9 +9,8 @@ # If you do not have access to either file, you may request a copy from # help@hdfgroup.org. # -option (PLUGIN_USE_EXTERNAL "Use External Library Building for filter PLUGIN" 0) +option (PLUGIN_USE_EXTERNAL "Use External Library Building for filter PLUGIN" OFF) -set (PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGIN" 1) if (NOT PLUGIN_USE_LOCALCONTENT) set (PLUGIN_URL ${PLUGIN_TGZ_ORIGPATH}/${PLUGIN_TGZ_NAME}) else () @@ -27,7 +26,7 @@ include (ExternalProject) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGIN" 1) + set (PLUGIN_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for PLUGIN") if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (PLUGIN_URL ${PLUGIN_GIT_URL} CACHE STRING "Path to PLUGIN git repository") set (PLUGIN_BRANCH ${PLUGIN_GIT_BRANCH}) @@ -42,7 +41,7 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT endif () endif () else () - set (PLUGIN_USE_EXTERNAL 0) + set (PLUGIN_USE_EXTERNAL OFF CACHE BOOL "Use External Library Building for PLUGIN") message (VERBOSE "Filter PLUGIN not built") endif () endif () diff --git a/config/cmake/HDF5PluginMacros.cmake b/config/cmake/HDF5PluginMacros.cmake index aa409f710a2..41f746fb002 100644 --- a/config/cmake/HDF5PluginMacros.cmake +++ b/config/cmake/HDF5PluginMacros.cmake @@ -94,10 +94,10 @@ macro (FILTER_OPTION plname) string(TOLOWER ${plname} PLUGIN_NAME) option (ENABLE_${plname} "Enable Library Building for ${plname} plugin" ON) if (ENABLE_${plname}) - option (HDF_${plname}_USE_EXTERNAL "Use External Library Building for ${PLUGIN_NAME} plugin" 0) + option (HDF_${plname}_USE_EXTERNAL "Use External Library Building for ${PLUGIN_NAME} plugin" OFF) mark_as_advanced (HDF_${plname}_USE_EXTERNAL) if (H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (HDF_${plname}_USE_EXTERNAL 1 CACHE BOOL "Use External Library Building for ${PLUGIN_NAME} plugin" FORCE) + set (HDF_${plname}_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for ${PLUGIN_NAME} plugin" FORCE) if (H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (HDF_${plname}_URL ${HDF_${plname}_GIT_URL}) set (HDF_${plname}_BRANCH ${HDF_${plname}_GIT_BRANCH}) diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index 835892471c2..281c9a18f6c 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -885,19 +885,19 @@ HDF5_ENABLE_PLUGIN_SUPPORT "Enable PLUGIN Filters" HDF5_ENABLE_SZIP_SUPPORT "Use SZip Filter" ON HDF5_ENABLE_Z_LIB_SUPPORT "Enable Zlib Filters" ON -ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0 -ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" -ZLIB_TGZ_NAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" -ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" ON +ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" OFF +ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" +ZLIB_TGZ_NAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" +ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" ON -SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0 +SZIP_USE_EXTERNAL "Use External Library Building for SZIP" OFF if (HDF5_ENABLE_SZIP_SUPPORT) - HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" ON -LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" -LIBAEC_TGZ_NAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" + HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" ON +LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" +LIBAEC_TGZ_NAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" LIBAEC_USE_LOCALCONTENT "Use local file for LIBAEC FetchContent" ON -PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGINS" 0 +PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGINS" OFF if (WINDOWS) H5_DEFAULT_PLUGINDIR "%ALLUSERSPROFILE%/hdf5/lib/plugin" else () From 95572568191492f0d79924a55ea10453095a20c1 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 17 Oct 2023 23:54:40 -0500 Subject: [PATCH 015/101] Sync changes that are only in 1.14 branch (#3704) --- CMakePresets.json | 3 ++- config/cmake-presets/hidden-presets.json | 3 ++- config/cmake/ConfigureChecks.cmake | 2 +- config/cmake/runTest.cmake | 16 ---------------- config/sanitizer/README.md | 3 ++- doxygen/examples/H5D_examples.c | 2 +- fortran/src/H5Fff.F90 | 1 + fortran/src/H5Lff.F90 | 3 +++ src/H5Olayout.c | 1 + tools/test/misc/talign.c | 2 +- 10 files changed, 14 insertions(+), 22 deletions(-) diff --git a/CMakePresets.json b/CMakePresets.json index 6d1a12fdff6..7b70970b17b 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -263,4 +263,5 @@ ] } ] -} \ No newline at end of file +} + diff --git a/config/cmake-presets/hidden-presets.json b/config/cmake-presets/hidden-presets.json index bd36153c278..fad63b7b387 100644 --- a/config/cmake-presets/hidden-presets.json +++ b/config/cmake-presets/hidden-presets.json @@ -488,4 +488,5 @@ ] } ] -} \ No newline at end of file +} + diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index f847457f5a0..3d4c23b362c 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -909,7 +909,7 @@ endmacro () #----------------------------------------------------------------------------- # ---------------------------------------------------------------------- -# Set the flag to indicate that the machine is using a special algorithm toconvert +# Set the flag to indicate that the machine is using a special algorithm to convert # 'long double' to '(unsigned) long' values. (This flag should only be set for # the IBM Power Linux. When the bit sequence of long double is # 0x4351ccf385ebc8a0bfcc2a3c3d855620, the converted value of (unsigned)long diff --git a/config/cmake/runTest.cmake b/config/cmake/runTest.cmake index 1304d36735b..d21765a8e36 100644 --- a/config/cmake/runTest.cmake +++ b/config/cmake/runTest.cmake @@ -218,14 +218,6 @@ if (NOT TEST_SKIP_COMPARE) file (READ ${TEST_FOLDER}/${TEST_REFERENCE} TEST_STREAM) list (LENGTH TEST_STREAM test_len) if (test_len GREATER 0) - # if (WIN32) # no longer needed for CMake > 3.15 - # configure_file(${TEST_FOLDER}/${TEST_REFERENCE} ${TEST_FOLDER}/${TEST_REFERENCE}.tmp NEWLINE_STYLE CRLF) - # if (EXISTS "${TEST_FOLDER}/${TEST_REFERENCE}.tmp") - # file(RENAME ${TEST_FOLDER}/${TEST_REFERENCE}.tmp ${TEST_FOLDER}/${TEST_REFERENCE}) - # endif () - # #file (READ ${TEST_FOLDER}/${TEST_REFERENCE} TEST_STREAM) - # #file (WRITE ${TEST_FOLDER}/${TEST_REFERENCE} "${TEST_STREAM}") - # endif () if (NOT TEST_SORT_COMPARE) # now compare the output with the reference @@ -293,14 +285,6 @@ if (NOT TEST_SKIP_COMPARE) file (READ ${TEST_FOLDER}/${TEST_ERRREF} TEST_STREAM) list (LENGTH TEST_STREAM test_len) if (test_len GREATER 0) - # if (WIN32) # no longer needed for CMake > 3.15 - # configure_file(${TEST_FOLDER}/${TEST_ERRREF} ${TEST_FOLDER}/${TEST_ERRREF}.tmp NEWLINE_STYLE CRLF) - # if (EXISTS "${TEST_FOLDER}/${TEST_ERRREF}.tmp") - # file(RENAME ${TEST_FOLDER}/${TEST_ERRREF}.tmp ${TEST_FOLDER}/${TEST_ERRREF}) - # endif () - # #file (READ ${TEST_FOLDER}/${TEST_ERRREF} TEST_STREAM) - # #file (WRITE ${TEST_FOLDER}/${TEST_ERRREF} "${TEST_STREAM}") - # endif () # now compare the error output with the error reference execute_process ( diff --git a/config/sanitizer/README.md b/config/sanitizer/README.md index 308f9c393aa..e3141455401 100644 --- a/config/sanitizer/README.md +++ b/config/sanitizer/README.md @@ -304,4 +304,5 @@ file(GLOB_RECURSE CMAKE_FILES ) cmake_format(TARGET_NAME ${CMAKE_FILES}) -``` \ No newline at end of file +``` + diff --git a/doxygen/examples/H5D_examples.c b/doxygen/examples/H5D_examples.c index ae483ee5566..1ad27947ee6 100644 --- a/doxygen/examples/H5D_examples.c +++ b/doxygen/examples/H5D_examples.c @@ -10,7 +10,7 @@ int chunk_cb(const hsize_t *offset, unsigned filter_mask, haddr_t addr, hsize_t size, void *op_data) { // only print the allocated chunk size only - printf("%ld\n", size); + printf("%" PRIuHSIZE "\n", size); return EXIT_SUCCESS; } //! diff --git a/fortran/src/H5Fff.F90 b/fortran/src/H5Fff.F90 index cfae7652e63..fee4d3c7cf8 100644 --- a/fortran/src/H5Fff.F90 +++ b/fortran/src/H5Fff.F90 @@ -43,6 +43,7 @@ MODULE H5F ! Number of objects opened in H5open_f INTEGER(SIZE_T) :: H5OPEN_NUM_OBJ + #ifndef H5_DOXYGEN INTERFACE INTEGER(C_INT) FUNCTION h5fis_accessible(name, & diff --git a/fortran/src/H5Lff.F90 b/fortran/src/H5Lff.F90 index bedfb8c6acc..004e5b23fdd 100644 --- a/fortran/src/H5Lff.F90 +++ b/fortran/src/H5Lff.F90 @@ -753,6 +753,9 @@ END FUNCTION H5Lexists hdferr = 0 IF(link_exists_c.LT.0_C_INT) hdferr = -1 + hdferr = 0 + IF(link_exists_c.LT.0) hdferr = -1 + END SUBROUTINE h5lexists_f !> diff --git a/src/H5Olayout.c b/src/H5Olayout.c index a686ce49e1c..1f2b6862b6e 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -591,6 +591,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU /* Avoid zero-size allocation */ mesg->storage.u.virt.list = NULL; } + mesg->storage.u.virt.list_nalloc = (size_t)tmp_hsize; mesg->storage.u.virt.list_nused = (size_t)tmp_hsize; diff --git a/tools/test/misc/talign.c b/tools/test/misc/talign.c index 2387be4b670..7de9d1afe5c 100644 --- a/tools/test/misc/talign.c +++ b/tools/test/misc/talign.c @@ -179,7 +179,7 @@ main(void) " %6f = %f\n", (double)fok[0], (double)fptr[0], (double)fok[1], (double)fptr[1], (double)fnok[0], (double)fptr[2], (double)fnok[1], (double)fptr[3]); - puts("*FAILED - compound type alignmnent problem*"); + puts("*FAILED - compound type alignment problem*"); } else { puts(" PASSED"); From 5e310254e205fd3a2a11afdab9746c49c43d5c06 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Wed, 18 Oct 2023 09:59:01 -0500 Subject: [PATCH 016/101] Correct ld in format strings in cmpd_dset.c (#3697) Removes clang warnings --- test/cmpd_dset.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c index 02dbde3759a..460c8ae7564 100644 --- a/test/cmpd_dset.c +++ b/test/cmpd_dset.c @@ -401,8 +401,10 @@ compare_a_b_c_data(void *exp1_buf, void *exp2_buf, void *rbuf) if (s1_ptr->a != rbuf_ptr->a || s2_ptr->b != rbuf_ptr->b || s2_ptr->c != rbuf_ptr->c) { H5_FAILED(); printf(" i=%d\n", i); - printf(" expect_buf:a=%ld, b=%ld, c=%ld\n", s1_ptr->a, s2_ptr->b, s2_ptr->c); - printf(" rbuf: a=%ld, b=%ld, c=%ld", rbuf_ptr->a, rbuf_ptr->b, rbuf_ptr->c); + printf(" expect_buf:a=%" PRId64 ", b=%" PRId64 ", c=%" PRId64 "\n", s1_ptr->a, s2_ptr->b, + s2_ptr->c); + printf(" rbuf: a=%" PRId64 ", b=%" PRId64 ", c=%" PRId64 "\n", rbuf_ptr->a, rbuf_ptr->b, + rbuf_ptr->c); goto error; } } /* end for */ From fce7ce18697ecead37e75d3d05483ef995cf24dd Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Wed, 18 Oct 2023 11:11:02 -0500 Subject: [PATCH 017/101] Clean up comments. (#3695) --- src/H5Dio.c | 76 ++++++++++++++++++++++++----------------------------- 1 file changed, 35 insertions(+), 41 deletions(-) diff --git a/src/H5Dio.c b/src/H5Dio.c index 2134ce1c79a..611518d3fc0 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -143,17 +143,17 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) } /* end if */ #endif /*H5_HAVE_PARALLEL*/ - /* iterate over all dsets and construct I/O information necessary to do I/O */ + /* Iterate over all dsets and construct I/O information necessary to do I/O */ for (i = 0; i < count; i++) { haddr_t prev_tag = HADDR_UNDEF; - /* check args */ + /* Check args */ if (NULL == dset_info[i].dset) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset"); if (NULL == dset_info[i].dset->oloc.file) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file"); - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* Set up datatype info for operation */ @@ -173,10 +173,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].nelmts > 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer"); - /* If the buffer is nil, and 0 element is selected, make a fake buffer. - * This is for some MPI package like ChaMPIon on NCSA's tungsten which - * doesn't support this feature. - */ + /* If the buffer is nil, and 0 element is selected, make a fake buffer. */ dset_info[i].buf.vp = &fake_char; } /* end if */ @@ -191,8 +188,8 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) * rapidly changing coordinates match up), but the I/O code still has * difficulties with the notion. * - * To solve this, we check to see if H5S_select_shape_same() returns true, - * and if the ranks of the mem and file spaces are different. If they are, + * To solve this, check if H5S_select_shape_same() returns true + * and the ranks of the mem and file spaces are different. If so, * construct a new mem space that is equivalent to the old mem space, and * use that instead. * @@ -347,7 +344,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].layout_ops.mdio_init) { haddr_t prev_tag = HADDR_UNDEF; - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* Make second phase IO init call */ @@ -396,7 +393,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].skip_io) continue; - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset object header addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* Invoke correct "high level" I/O routine */ @@ -553,18 +550,18 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (NULL == (store = (H5D_storage_t *)H5MM_malloc(count * sizeof(H5D_storage_t)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset storage info array buffer"); - /* iterate over all dsets and construct I/O information */ + /* Iterate over all dsets and construct I/O information */ for (i = 0; i < count; i++) { bool should_alloc_space = false; /* Whether or not to initialize dataset's storage */ haddr_t prev_tag = HADDR_UNDEF; - /* check args */ + /* Check args */ if (NULL == dset_info[i].dset) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset"); if (NULL == dset_info[i].dset->oloc.file) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file"); - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* All filters in the DCPL must have encoding enabled. */ @@ -620,10 +617,7 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].nelmts > 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no input buffer"); - /* If the buffer is nil, and 0 element is selected, make a fake buffer. - * This is for some MPI package like ChaMPIon on NCSA's tungsten which - * doesn't support this feature. - */ + /* If the buffer is nil, and 0 element is selected, make a fake buffer. */ dset_info[i].buf.cvp = &fake_char; } /* end if */ @@ -633,18 +627,18 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (!(H5S_has_extent(dset_info[i].mem_space))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set"); - /* H5S_select_shape_same() has been modified to accept topologically - * identical selections with different rank as having the same shape - * (if the most rapidly changing coordinates match up), but the I/O - * code still has difficulties with the notion. + /* H5S_select_shape_same() has been modified to accept topologically identical + * selections with different rank as having the same shape (if the most + * rapidly changing coordinates match up), but the I/O code still has + * difficulties with the notion. * - * To solve this, we check to see if H5S_select_shape_same() returns - * true, and if the ranks of the mem and file spaces are different. - * If they are, construct a new mem space that is equivalent to the - * old mem space, and use that instead. + * To solve this, check if H5S_select_shape_same() returns true + * and the ranks of the mem and file spaces are different. If so, + * construct a new mem space that is equivalent to the old mem space, and + * use that instead. * - * Note that in general, this requires us to touch up the memory buffer - * as well. + * Note that in general, this requires us to touch up the memory buffer as + * well. */ if (dset_info[i].nelmts > 0 && true == H5S_SELECT_SHAPE_SAME(dset_info[i].mem_space, dset_info[i].file_space) && @@ -818,11 +812,11 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) "unable to allocate array of selected pieces"); } - /* loop with serial & single-dset write IO path */ + /* Loop with serial & single-dset write IO path */ for (i = 0; i < count; i++) { assert(!dset_info[i].skip_io); - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info->dset->oloc.addr, &prev_tag); /* Invoke correct "high level" I/O routine */ @@ -936,7 +930,7 @@ H5D__ioinfo_init(size_t count, H5D_io_op_type_t op_type, H5D_dset_io_info_t *dse FUNC_ENTER_PACKAGE_NOERR - /* check args */ + /* Check args */ assert(count > 0); assert(dset_info); assert(dset_info[0].dset->oloc.file); @@ -1057,7 +1051,7 @@ H5D__typeinfo_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hid_t FUNC_ENTER_PACKAGE - /* check args */ + /* Check args */ assert(io_info); assert(dset_info); @@ -1151,7 +1145,7 @@ H5D__typeinfo_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hid_t /*------------------------------------------------------------------------- * Function: H5D__typeinfo_init_phase2 * - * Purpose: Continue initializing type info for all datasets after + * Purpose: Continues initializing type info for all datasets after * calculating the max type size across all datasets, and * before final determination of collective/independent in * H5D__ioinfo_adjust(). Currently just checks to see if @@ -1169,7 +1163,7 @@ H5D__typeinfo_init_phase2(H5D_io_info_t *io_info) FUNC_ENTER_PACKAGE - /* check args */ + /* Check args */ assert(io_info); /* If selection I/O mode is default (auto), enable it here if the VFD supports it (it will be turned off @@ -1238,7 +1232,7 @@ H5D__typeinfo_init_phase2(H5D_io_info_t *io_info) /*------------------------------------------------------------------------- * Function: H5D__ioinfo_adjust * - * Purpose: Adjust operation's I/O info for any parallel I/O, also + * Purpose: Adjusts operation's I/O info for any parallel I/O, also * handle decision on selection I/O even in serial case * * Return: Non-negative on success/Negative on failure @@ -1253,10 +1247,10 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) FUNC_ENTER_PACKAGE - /* check args */ + /* Check args */ assert(io_info); - /* check the first dset, should exist either single or multi dset cases */ + /* Check the first dset, should exist either single or multi dset cases */ assert(io_info->dsets_info[0].dset); dset0 = io_info->dsets_info[0].dset; assert(dset0->oloc.file); @@ -1317,7 +1311,7 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) if (io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused > 0) break; - /* If the above loop didn't complete at least one dataset has a filter */ + /* If the above loop didn't complete, at least one dataset has a filter */ if (i < io_info->count) { int comm_size = 0; @@ -1363,9 +1357,9 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) /*------------------------------------------------------------------------- * Function: H5D__typeinfo_init_phase3 * - * Purpose: Finish initializing type info for all datasets after - * calculating the max type size across all datasets. And - * after final collective/independent determination in + * Purpose: Finishes initializing type info for all datasets after + * calculating the max type size across all datasets and + * final collective/independent determination in * H5D__ioinfo_adjust(). * * Return: Non-negative on success/Negative on failure From 65e1bd8ce90d286bd4ffc1575414d775ae86a319 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:44:42 -0500 Subject: [PATCH 018/101] Add NVidia compiler support and CI (#3686) --- .github/workflows/autotools.yml | 6 + .github/workflows/cmake.yml | 6 + .github/workflows/intel-auto.yml | 12 +- .github/workflows/intel-cmake.yml | 8 +- .github/workflows/linux-nvhpc-auto.yml | 58 --------- .github/workflows/linux-nvhpc.yml | 56 --------- .github/workflows/nvhpc-auto.yml | 87 +++++++++++++ .github/workflows/nvhpc-cmake.yml | 76 +++++++++++ config/cmake/HDFCXXCompilerFlags.cmake | 18 ++- config/cmake/HDFCompilerFlags.cmake | 13 +- config/cmake/HDFFortranCompilerFlags.cmake | 12 +- config/linux-gnulibc1 | 4 + config/nvidia-cxxflags | 101 +++++++++++++++ config/nvidia-fflags | 139 +++++++++++++++++++++ config/nvidia-flags | 122 ++++++++++++++++++ fortran/src/CMakeLists.txt | 2 +- 16 files changed, 593 insertions(+), 127 deletions(-) delete mode 100644 .github/workflows/linux-nvhpc-auto.yml delete mode 100644 .github/workflows/linux-nvhpc.yml create mode 100644 .github/workflows/nvhpc-auto.yml create mode 100644 .github/workflows/nvhpc-cmake.yml create mode 100644 config/nvidia-cxxflags create mode 100644 config/nvidia-fflags create mode 100644 config/nvidia-flags diff --git a/.github/workflows/autotools.yml b/.github/workflows/autotools.yml index 89afa405c43..d0cf5577738 100644 --- a/.github/workflows/autotools.yml +++ b/.github/workflows/autotools.yml @@ -52,3 +52,9 @@ jobs: uses: ./.github/workflows/intel-auto.yml with: build_mode: "production" + + call-release-auto-nvhpc: + name: "Autotools nvhpc Workflows" + uses: ./.github/workflows/nvhpc-auto.yml + with: + build_mode: "production" diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index 75180c0c048..84fe01094d9 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -44,3 +44,9 @@ jobs: uses: ./.github/workflows/intel-cmake.yml with: build_mode: "Release" + + call-release-cmake-nvhpc: + name: "CMake nvhpc Workflows" + uses: ./.github/workflows/nvhpc-cmake.yml + with: + build_mode: "Release" diff --git a/.github/workflows/intel-auto.yml b/.github/workflows/intel-auto.yml index f5249bdaf4a..d63262f28a1 100644 --- a/.github/workflows/intel-auto.yml +++ b/.github/workflows/intel-auto.yml @@ -13,14 +13,16 @@ permissions: jobs: Intel_build_and_test: - name: "Intel ${{ inputs.build_mode }} -Werror (build only)" + name: "Intel ${{ inputs.build_mode }}" runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - name: Install Dependencies - run: | + run: | sudo apt-get update sudo apt-get install autoconf automake libtool libtool-bin libaec-dev + - name: Add oneAPI to apt shell: bash run: | @@ -39,9 +41,9 @@ jobs: sudo apt install -y intel-oneapi-mpi-devel sudo apt-get install doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev - echo "CC=icx" >> $GITHUB_ENV - echo "CXX=icpx" >> $GITHUB_ENV - echo "FC=ifx" >> $GITHUB_ENV + echo "CC=icx" >> $GITHUB_ENV + echo "CXX=icpx" >> $GITHUB_ENV + echo "FC=ifx" >> $GITHUB_ENV - name: Install oneAPI MKL library shell: bash diff --git a/.github/workflows/intel-cmake.yml b/.github/workflows/intel-cmake.yml index 4bdda03c79d..9972376332e 100644 --- a/.github/workflows/intel-cmake.yml +++ b/.github/workflows/intel-cmake.yml @@ -14,7 +14,7 @@ permissions: jobs: Intel_build_and_test: - name: "Intel ${{ inputs.build_mode }} -Werror (build only)" + name: "Intel ${{ inputs.build_mode }}" runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -30,9 +30,9 @@ jobs: - name: Install Linux Dependencies run: | - sudo apt update - sudo apt-get install ninja-build doxygen graphviz - sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev - name: install oneAPI dpcpp and fortran compiler shell: bash diff --git a/.github/workflows/linux-nvhpc-auto.yml b/.github/workflows/linux-nvhpc-auto.yml deleted file mode 100644 index 1281e979633..00000000000 --- a/.github/workflows/linux-nvhpc-auto.yml +++ /dev/null @@ -1,58 +0,0 @@ -name: linux autotools nvhpc - -on: - workflow_dispatch: - push: - pull_request: - branches: [ develop ] - paths-ignore: - - '.github/CODEOWNERS' - - '.github/FUNDING.yml' - - 'doc/**' - - 'release_docs/**' - - 'ACKNOWLEDGEMENTS' - - 'COPYING**' - - '**.md' - -# Using concurrency to cancel any in-progress job or run -concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Install System dependencies - run: | - sudo apt update - sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev doxygen openssl libtool libtool-bin build-essential - - name: Install NVHPC - run: | - curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg - echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list - sudo apt-get update -y - sudo apt-get install -y nvhpc-23-7 - - name: Get Sources - uses: actions/checkout@v4 - - name: Test HDF5 - env: - NPROCS: 2 - run: | - export NVHPCSDK=/opt/nvidia/hpc_sdk - export OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++ - export OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc - export OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran - export LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH - export DESTDIR=/tmp - ./autogen.sh - ./configure CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpicc FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpifort FCFLAGS="-fPIC -fortranlibs" --enable-fortran --enable-shared --enable-parallel - cat config.log - make -j - make check -j - make install - make uninstall diff --git a/.github/workflows/linux-nvhpc.yml b/.github/workflows/linux-nvhpc.yml deleted file mode 100644 index 06fd40a7c4f..00000000000 --- a/.github/workflows/linux-nvhpc.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: linux CMake nvhpc - -on: - workflow_dispatch: - push: - pull_request: - branches: [ develop ] - paths-ignore: - - '.github/CODEOWNERS' - - '.github/FUNDING.yml' - - 'doc/**' - - 'release_docs/**' - - 'ACKNOWLEDGEMENTS' - - 'COPYING**' - - '**.md' - -# Using concurrency to cancel any in-progress job or run -concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Install System dependencies - run: | - sudo apt update - sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev doxygen openssl libtool libtool-bin build-essential - - name: Install NVHPC - run: | - curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg - echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list - sudo apt-get update -y - sudo apt-get install -y nvhpc-23-7 - - name: Get Sources - uses: actions/checkout@v4 - - name: Test HDF5 - env: - FC: nvfortran - CC: nvc - FCFLAGS: -fPIC - run: | - export NVHPCSDK=/opt/nvidia/hpc_sdk - export OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++ - export OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc - export OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran - export LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/cuda/12.2/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH - cmake -B build -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF -DHDF5_ENABLE_PARALLEL:BOOL=ON -DHDF5_BUILD_FORTRAN:BOOL=ON - cat build/CMakeCache.txt - cmake --build build - ctest --test-dir build --output-on-failure diff --git a/.github/workflows/nvhpc-auto.yml b/.github/workflows/nvhpc-auto.yml new file mode 100644 index 00000000000..3e3a323fe1e --- /dev/null +++ b/.github/workflows/nvhpc-auto.yml @@ -0,0 +1,87 @@ +name: hdf5 dev autotools nvhpc + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + nvhpc_build_and_test: + name: "nvhpc ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install autoconf automake libtool libtool-bin libaec-dev + sudo apt-get install doxygen graphviz + sudo apt install -y zlib1g-dev libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev openssl build-essential + + - name: Install NVHPC + shell: bash + run: | + curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg + echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list + sudo apt-get update -y + sudo apt-get install -y nvhpc-23-7 + echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV + echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV + echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib" >> $GITHUB_ENV + echo "DESTDIR=/tmp" >> $GITHUB_ENV + + - name: Autotools Configure + shell: bash + run: | + export RUNPARALLEL="mpiexec -np 2" + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + sh ./autogen.sh + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + $GITHUB_WORKSPACE/configure \ + FCFLAGS="-fPIC -fortranlibs" \ + --enable-build-mode=${{ inputs.build_mode }} \ + --enable-fortran \ + --enable-shared \ + --enable-parallel + #cat config.log + + # BUILD + - name: Autotools Build + shell: bash + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + make -j3 + working-directory: ${{ runner.workspace }}/build + + # RUN TESTS + # NORMAL + - name: Autotools Run Tests + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + make check -j + working-directory: ${{ runner.workspace }}/build + + # INSTALL (note that this runs even when we don't run the tests) + - name: Autotools Install + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + make install + working-directory: ${{ runner.workspace }}/build + +# - name: Autotools Verify Install +# run: | +# export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH +# make check-install +# working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/nvhpc-cmake.yml b/.github/workflows/nvhpc-cmake.yml new file mode 100644 index 00000000000..489c0bbf3fb --- /dev/null +++ b/.github/workflows/nvhpc-cmake.yml @@ -0,0 +1,76 @@ +name: hdf5 dev CMake nvhpc + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + nvhpc_build_and_test: + name: "nvhpc ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Linux dependencies + shell: bash + run: | + sudo apt update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install -y libaec-dev zlib1g-dev wget curl bzip2 flex bison cmake libzip-dev openssl build-essential + + - name: Install NVHPC + shell: bash + run: | + curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg + echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list + sudo apt-get update -y + sudo apt-get install -y nvhpc-23-7 + echo "CC=nvc" >> $GITHUB_ENV + echo "FC=nvfortran" >> $GITHUB_ENV + echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/cuda/12.2/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib" >> $GITHUB_ENV + echo "DESTDIR=/tmp" >> $GITHUB_ENV + + - name: CMake Configure with nvc + shell: bash + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ + -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ + -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF \ + -DHDF5_ENABLE_PARALLEL:BOOL=ON \ + -DHDF5_BUILD_CPP_LIB:BOOL=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_BUILD_FORTRAN:BOOL=ON \ + -DHDF5_ENABLE_ASSERTS:BOOL=ON \ + -DMPIEXEC_MAX_NUMPROCS:STRING="2" \ + $GITHUB_WORKSPACE + cat src/libhdf5.settings + + # BUILD + - name: CMake Build + shell: bash + run: | + cmake --build . --parallel 3 --config ${{ inputs.build_mode }} + working-directory: ${{ runner.workspace }}/build + + # RUN TESTS +# - name: CMake Run Tests +# shell: bash +# run: | +# ctest . --parallel 2 -C ${{ inputs.build_mode }} -V +# working-directory: ${{ runner.workspace }}/build diff --git a/config/cmake/HDFCXXCompilerFlags.cmake b/config/cmake/HDFCXXCompilerFlags.cmake index e8a55ba779b..13f712dd344 100644 --- a/config/cmake/HDFCXXCompilerFlags.cmake +++ b/config/cmake/HDFCXXCompilerFlags.cmake @@ -49,6 +49,22 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL SunPro AND CMAKE_CXX_COMPILER_LOADED) endif () endif () +if (CMAKE_CXX_COMPILER_ID STREQUAL "NVHPC" AND CMAKE_CXX_COMPILER_LOADED) + if (NOT DEFINED CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION) + if (NOT CMAKE_CXX_STANDARD OR CMAKE_CXX_STANDARD EQUAL 11) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_C11_STANDARD_COMPILE_OPTION}") + endif () + endif () + if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Minform=warn") + if (NOT ${HDF_CFG_NAME} MATCHES "RelWithDebInfo") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -s") + endif () + else () + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Mbounds -gopt -g") + endif () +endif () + if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED) set (CMAKE_CXX_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_CXX_FLAGS}") if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") @@ -97,7 +113,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- if (${CMAKE_SYSTEM_NAME} MATCHES "SunOS") diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index 1dca9103ef3..a6bce982849 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -47,6 +47,17 @@ if(_CLANG_MSVC_WINDOWS AND "x${CMAKE_C_COMPILER_FRONTEND_VARIANT}" STREQUAL "xGN set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Xlinker -stack:20000000") endif() +if(CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Minform=warn") + if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") + if (NOT ${HDF_CFG_NAME} MATCHES "RelWithDebInfo") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s") + endif () + else () + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -g") + endif () +endif() + if (CMAKE_COMPILER_IS_GNUCC) set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}") if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") @@ -106,7 +117,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- diff --git a/config/cmake/HDFFortranCompilerFlags.cmake b/config/cmake/HDFFortranCompilerFlags.cmake index e08df05c52d..f207c7062c5 100644 --- a/config/cmake/HDFFortranCompilerFlags.cmake +++ b/config/cmake/HDFFortranCompilerFlags.cmake @@ -41,7 +41,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 10.0) if (HDF5_ENABLE_BUILD_DIAGS) @@ -56,6 +56,16 @@ if (CMAKE_Fortran_COMPILER_ID STREQUAL "NAG") message (STATUS "... Select IEEE floating-point mode full") list (APPEND HDF5_CMAKE_Fortran_FLAGS "-ieee=full") endif () +if (CMAKE_Fortran_COMPILER_ID STREQUAL "NVHPC") + if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -Mnoframe") + if (NOT ${HDF_CFG_NAME} MATCHES "RelWithDebInfo") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -s") + endif () + else () + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -Mbounds -Mchkptr -Mdclchk -g") + endif () +endif () if (NOT MSVC AND NOT MINGW) # General flags diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1 index 7f3c3398048..7614b07852f 100644 --- a/config/linux-gnulibc1 +++ b/config/linux-gnulibc1 @@ -298,6 +298,7 @@ case $FC in *pgf90*) fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'pgf90'` ;; + *nagfor*|*nagftn*) RM='rm -f' tmpfile=/tmp/cmpver.$$ @@ -322,6 +323,9 @@ fi # check if the compiler_version_info is already set if test -z "$cxx_version_info"; then case $CXX in + *nvc++*) + cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'nvc++'` + ;; *pgc++*) cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'pgc++'` ;; diff --git a/config/nvidia-cxxflags b/config/nvidia-cxxflags new file mode 100644 index 00000000000..6becd26887a --- /dev/null +++ b/config/nvidia-cxxflags @@ -0,0 +1,101 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# NVIDIA nvc++ compiler or a derivative. It is careful not to do anything +# if the compiler is not NVIDIA; otherwise `cxx_flags_set' is set to `yes' +# + +# Get the compiler version in a way that works for NVIDIA nvc++ +# unless a compiler version is already known +# +# cxx_vendor: The compiler name: nvc++ +# cxx_version: Version number: 5.0-2, 5.2-2 +# +if test X = "X$cxx_flags_set"; then + cxx_version="`$CXX $CXXFLAGS -V 2>&1 |grep '^nvc++ '`" + if test X != "X$cxx_version"; then + cxx_vendor=`echo $cxx_version |sed 's/\([a-z]*++\).*/\1/'` + cxx_version=`echo $cxx_version |sed 's/nvc++ \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CXX' is NVIDIA $cxx_vendor-$cxx_version" + + # Some version numbers + # NVIDIA version numbers are of the form: "major.minor-patch" + cxx_vers_major=`echo $cxx_version | cut -f1 -d.` + cxx_vers_minor=`echo $cxx_version | cut -f2 -d. | cut -f1 -d-` + cxx_vers_patch=`echo $cxx_version | cut -f2 -d. | cut -f2 -d-` + test -n "$cxx_vers_major" || cxx_vers_major=0 + test -n "$cxx_vers_minor" || cxx_vers_minor=0 + test -n "$cxx_vers_patch" || cxx_vers_patch=0 + cxx_vers_all=`expr $cxx_vers_major '*' 1000000 + $cxx_vers_minor '*' 1000 + $cxx_vers_patch` + fi +fi + +# Common PGI flags for various situations +if test "X-nvc++" = "X-$cxx_vendor"; then + + ########### + # General # + ########### + + # Default to C++11 standard + H5_CXXFLAGS="$H5_CXXFLAGS -std=c++11 -Minform=warn" + + ############## + # Production # + ############## + + PROD_CXXFLAGS= + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly in configure + # -g is handled by the symbols flags + DEBUG_CXXFLAGS="-Mbounds" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CXXFLAGS="-s" + SYMBOLS_CXXFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CXXFLAGS="-Mprof=func,line" + # Use this for profiling with gprof + #PROFILE_CXXFLAGS="-pg" + + ################ + # Optimization # + ################ + + HIGH_OPT_CXXFLAGS="-O4" + DEBUG_OPT_CXXFLAGS="-gopt -O2" + NO_OPT_CXXFLAGS="-O0" + + ################# + # Flags are set # + ################# + cxx_flags_set=yes +fi + +# Clear cxx info if no flags set +if test "X-$cxx_flags_set" = "X-"; then + cxx_vendor= + cxx_version= +fi diff --git a/config/nvidia-fflags b/config/nvidia-fflags new file mode 100644 index 00000000000..77677e1036a --- /dev/null +++ b/config/nvidia-fflags @@ -0,0 +1,139 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# NVIDIA nvfortran compiler or a derivative. It is careful not to do anything +# if the compiler is not NVIDIA; otherwise `f9x_flags_set' is set to `yes' +# + +# Get the compiler version in a way that works for NVIDIA nvfortran +# unless a compiler version is already known +# +# f9x_vendor: The compiler name: nvfortran +# f9x_version: Version number: +# +if test X = "X$f9x_flags_set"; then + f9x_version="`$FC $FCFLAGS -V 2>&1 |grep '^nvfortran '`" + if test X != "X$f9x_version"; then + is_mpi="`$FC $FCFLAGS -help 2>&1 |grep 'link MPI'`" + f9x_vendor=`echo $f9x_version |sed 's/\([a-z0-9]*\).*/\1/'` + f9x_version=`echo $f9x_version |sed 's/nvfortran \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$FC' is NVIDIA $f9x_vendor-$f9x_version" + + # Some version numbers + # NVIDIA version numbers are of the form: "major.minor-patch" + f9x_vers_major=`echo $f9x_version | cut -f1 -d.` + f9x_vers_minor=`echo $f9x_version | cut -f2 -d. | cut -f1 -d-` + f9x_vers_patch=`echo $f9x_version | cut -f2 -d. | cut -f2 -d-` + test -n "$f9x_vers_major" || f9x_vers_major=0 + test -n "$f9x_vers_minor" || f9x_vers_minor=0 + test -n "$f9x_vers_patch" || f9x_vers_patch=0 + f9x_vers_all=`expr $f9x_vers_major '*' 1000000 + $f9x_vers_minor '*' 1000 + $f9x_vers_patch` + fi +fi + +# Common NVIDIA flags for various situations +if test "X-nvfortran" = "X-$f9x_vendor"; then + + ############################### + # Architecture-specific flags # + ############################### + + arch= + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ############## + # Production # + ############## + + # Check for MPI wrapper being used and tweak down compiler options + # Comment out the Tweaking since it caused problems to mpich1.2.6. + # Need to investigate the reasons to tweak. + #if test "X-" == "X-$is_mpi"; then + # PROD_FCFLAGS="-fast -s -Mnoframe" + #else + # PROD_FCFLAGS="-O2 -s" + #fi + PROD_FCFLAGS="-fast -Mnoframe -fPIC" + + ######### + # Debug # + ######### + + DEBUG_FCFLAGS="-Mbounds -Mchkptr -Mdclchk -fPIC" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_FCFLAGS="-s" + SYMBOLS_FCFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_FCFLAGS="-Mprof=func,line" + # Use this for profiling with gprof + #PROFILE_FCFLAGS="-pg" + + ################ + # Optimization # + ################ + + HIGH_OPT_FCFLAGS= + DEBUG_OPT_FCFLAGS= + NO_OPT_FCFLAGS= + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + FC_BASENAME=nvfortran + Fortran_COMPILER_ID=NVIDIA + F9XSUFFIXFLAG="" + FSEARCH_DIRS="" + # Uncomment the following to add something specific for FCFLAGS. + #FCFLAGS="$FCFLAGS" + + + + ################# + # Flags are set # + ################# + f9x_flags_set=yes +fi + +# Clear f9x info if no flags set +if test "X-$f9x_flags_set" = "X-"; then + f9x_vendor= + f9x_version= +fi + diff --git a/config/nvidia-flags b/config/nvidia-flags new file mode 100644 index 00000000000..864c6444114 --- /dev/null +++ b/config/nvidia-flags @@ -0,0 +1,122 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# NVIDIA nvc compiler or a derivative. It is careful not to do anything +# if the compiler is not nvcc; otherwise `cc_flags_set' is set to `yes' +# + +# Get the compiler version in a way that works for nvc +# unless a compiler version is already known +# +# cc_vendor: The compiler name: nvc +# cc_version: Version number: 5.0-2, 5.2-2 +# +if test X = "X$cc_flags_set"; then + cc_version="`$CC $CFLAGS -V 2>&1 |grep '^nvc '`" + if test X != "X$cc_version"; then + is_mpi="`$CC $CFLAGS -help 2>&1 |grep 'MPI'`" + cc_vendor=`echo $cc_version |sed 's/\([a-z]*\).*/\1/'` + cc_version=`echo $cc_version |sed 's/nvc \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CC' is NVIDIA $cc_vendor-$cc_version" + + # Some version numbers + # NVIDIA version numbers are of the form: "major.minor-patch" + cc_vers_major=`echo $cc_version | cut -f1 -d.` + cc_vers_minor=`echo $cc_version | cut -f2 -d. | cut -f1 -d-` + cc_vers_patch=`echo $cc_version | cut -f2 -d. | cut -f2 -d-` + test -n "$cc_vers_major" || cc_vers_major=0 + test -n "$cc_vers_minor" || cc_vers_minor=0 + test -n "$cc_vers_patch" || cc_vers_patch=0 + cc_vers_all=`expr $cc_vers_major '*' 1000000 + $cc_vers_minor '*' 1000 + $cc_vers_patch` + fi +fi + +# Common PGI flags for various situations +if test "X-nvc" = "X-$cc_vendor" -o "X-nvcc" = "X-$cc_vendor"; then + # Insert section about version specific problems from compiler flags here, + # if necessary. + + arch= + # Architecture-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ########### + # General # + ########### + + # Default to C99 standard. + H5_CFLAGS="$H5_CFLAGS $arch -c99 -Minform=warn" + + ############## + # Production # + ############## + + # NDEBUG is handled explicitly by the configure script + PROD_CFLAGS="-fast" + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly by the configure script + # -g is handled by the symbols flags + DEBUG_CFLAGS="-Mbounds" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CFLAGS="-s" + SYMBOLS_CFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CFLAGS="-Mprof=func,line" + # Use this for profiling with gprof + #PROFILE_CFLAGS="-pg" + + ################ + # Optimization # + ################ + + HIGH_OPT_CFLAGS="-O1" # -O2+ currently has test failures. + DEBUG_OPT_CFLAGS="-gopt -O2" + NO_OPT_CFLAGS="-O0" + + ################# + # Flags are set # + ################# + cc_flags_set=yes +fi + +# Clear cc info if no flags set +if test "X-$cc_flags_set" = "X-"; then + cc_vendor= + cc_version= +fi diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index 199a0c8f148..57e17e5a675 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -332,7 +332,7 @@ if (BUILD_STATIC_LIBS) target_compile_options(${HDF5_F90_LIB_TARGET} PRIVATE "${HDF5_CMAKE_Fortran_FLAGS}") target_compile_definitions(${HDF5_F90_LIB_TARGET} PRIVATE - "$<$:HDF5F90_WINDOWS"> + "$<$:HDF5F90_WINDOWS>" "$<$:${WIN_COMPILE_FLAGS}>" ) target_link_libraries (${HDF5_F90_LIB_TARGET} From 8ff48054f83d2b262c799f49b6beb6459d8a5c48 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:46:02 -0500 Subject: [PATCH 019/101] Cache variables req FORCE to change (#3706) * Cache variables req FORCE to change * Also plugin needs FORCE --- CMakeFilters.cmake | 4 ++-- CMakePlugins.cmake | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake index da8e2c74ff4..72f7f459427 100644 --- a/CMakeFilters.cmake +++ b/CMakeFilters.cmake @@ -36,8 +36,8 @@ include (ExternalProject) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (ZLIB_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for ZLIB") - set (SZIP_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for SZIP") + set (ZLIB_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for ZLIB" FORCE) + set (SZIP_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for SZIP" FORCE) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (ZLIB_URL ${ZLIB_GIT_URL} CACHE STRING "Path to zlib git repository") set (ZLIB_BRANCH ${ZLIB_GIT_BRANCH}) diff --git a/CMakePlugins.cmake b/CMakePlugins.cmake index bc1074fbba3..7fd332a2ebe 100644 --- a/CMakePlugins.cmake +++ b/CMakePlugins.cmake @@ -26,7 +26,7 @@ include (ExternalProject) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (PLUGIN_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for PLUGIN") + set (PLUGIN_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for PLUGIN" FORCE) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (PLUGIN_URL ${PLUGIN_GIT_URL} CACHE STRING "Path to PLUGIN git repository") set (PLUGIN_BRANCH ${PLUGIN_GIT_BRANCH}) From ebd3744407cf835fcca0170814517da09f62d814 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 18 Oct 2023 16:47:13 -0500 Subject: [PATCH 020/101] Work around Theta system issue failure in links test (#3710) When the Subfiling VFD is enabled, the links test may try to initialize the Subfiling VFD and call MPI_Init_thread. On Theta, this appears to have an issue that will cause the links test to fail. Reworked the test to check for the same conditions in a more roundabout way that doesn't involved initializing the Subfiling VFD --- test/links.c | 56 ++++++++++++++++++++-------------------------------- 1 file changed, 21 insertions(+), 35 deletions(-) diff --git a/test/links.c b/test/links.c index 6f07d32253e..99e011402c0 100644 --- a/test/links.c +++ b/test/links.c @@ -9880,6 +9880,7 @@ external_set_elink_cb(hid_t fapl, bool new_format) set_elink_cb_t op_data, *op_data_p; H5L_elink_traverse_t cb; char filename1[NAME_BUF_SIZE], filename2[NAME_BUF_SIZE]; + bool driver_is_parallel; unsigned flags; if (new_format) @@ -9890,16 +9891,21 @@ external_set_elink_cb(hid_t fapl, bool new_format) /* Build user data for callback */ op_data.parent_file = filename1; op_data.target_file = filename2; + + /* Check if using a parallel file driver */ + if (h5_using_parallel_driver(fapl, &driver_is_parallel) < 0) + TEST_ERROR; + + base_driver = H5Pget_driver(fapl); + /* Core file driver has issues when used as the member file driver for a family file */ /* Family file driver cannot be used with family or multi drivers for member files */ /* Also disable parallel member drivers, because H5F_HAS_FEATURE(H5FD_FEAT_HAS_MPI) would report false, causing problems */ - base_driver = H5Pget_driver(fapl); - op_data.base_fapl = - (base_driver == H5FD_FAMILY || base_driver == H5FD_MULTI || base_driver == H5FD_MPIO || - base_driver == H5FD_CORE || base_driver == H5FD_DIRECT || base_driver == H5FD_SUBFILING) - ? H5P_DEFAULT - : fapl; + op_data.base_fapl = fapl; + if (base_driver == H5FD_CORE || base_driver == H5FD_FAMILY || base_driver == H5FD_MULTI || + base_driver == H5FD_DIRECT || driver_is_parallel) + op_data.base_fapl = H5P_DEFAULT; op_data.fam_size = ELINK_CB_FAM_SIZE; op_data.code = 0; @@ -18434,14 +18440,12 @@ link_info_by_idx_old(hid_t fapl) { hid_t file_id = H5I_INVALID_HID; /* File ID */ hid_t group_id = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID; /* Group IDs */ - H5F_t *f = NULL; - unsigned hard_link; /* Create hard or soft link? */ - H5L_info2_t linfo; /* Link info struct */ - char objname[NAME_BUF_SIZE]; /* Object name */ - char valname[NAME_BUF_SIZE]; /* Link value name */ - char filename[NAME_BUF_SIZE]; /* File name */ + unsigned hard_link; /* Create hard or soft link? */ + H5L_info2_t linfo; /* Link info struct */ + char objname[NAME_BUF_SIZE]; /* Object name */ + char valname[NAME_BUF_SIZE]; /* Link value name */ + char filename[NAME_BUF_SIZE]; /* File name */ H5O_token_t objtoken[CORDER_NLINKS]; /* Tokens (Addresses) of the objects created */ - void *vol_obj_file = NULL; /* Object of file_id */ char tmpname[NAME_BUF_SIZE]; /* Temporary link name */ char tmpval[NAME_BUF_SIZE]; /* Temporary link value */ unsigned u; /* Local index variable */ @@ -18461,14 +18465,6 @@ link_info_by_idx_old(hid_t fapl) if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR; - /* Need the file struct to address encoding */ - /* Retrieve VOL object */ - if (NULL == (vol_obj_file = H5VL_vol_object(file_id))) - TEST_ERROR; - /* Retrieve file from VOL object */ - if (NULL == (f = (H5F_t *)H5VL_object_data((const H5VL_object_t *)vol_obj_file))) - TEST_ERROR; - /* Create group to operate on */ if ((group_id = H5Gcreate2(file_id, CORDER_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; @@ -19066,12 +19062,10 @@ delete_by_idx_old(hid_t fapl) { hid_t file_id = H5I_INVALID_HID; /* File ID */ hid_t group_id = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID; /* Group IDs */ - H5F_t *f = NULL; - H5L_info2_t linfo; /* Link info struct */ - H5_iter_order_t order; /* Order within in the index */ - void *vol_obj_file = NULL; /* Object of file_id */ - char objname[NAME_BUF_SIZE]; /* Object name */ - char filename[NAME_BUF_SIZE]; /* File name */ + H5L_info2_t linfo; /* Link info struct */ + H5_iter_order_t order; /* Order within in the index */ + char objname[NAME_BUF_SIZE]; /* Object name */ + char filename[NAME_BUF_SIZE]; /* File name */ H5O_token_t objtoken[CORDER_NLINKS]; /* Tokens (Addresses) of the objects created */ char tmpname[NAME_BUF_SIZE]; /* Temporary link name */ unsigned u; /* Local index variable */ @@ -19091,14 +19085,6 @@ delete_by_idx_old(hid_t fapl) if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR; - /* Need the file struct to address encoding */ - /* Retrieve VOL object */ - if (NULL == (vol_obj_file = H5VL_vol_object(file_id))) - TEST_ERROR; - /* Retrieve file from VOL object */ - if (NULL == (f = (H5F_t *)H5VL_object_data((const H5VL_object_t *)vol_obj_file))) - TEST_ERROR; - /* Create group to operate on */ if ((group_id = H5Gcreate2(file_id, CORDER_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; From 29c1c02300c00deb2584e672041b3775b11aaf87 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 19 Oct 2023 08:01:02 -0500 Subject: [PATCH 021/101] Some corrections and fix for plugins (#3712) --- CMakeLists.txt | 7 ++++ config/cmake/HDF5PluginCache.cmake | 2 +- config/cmake/HDF5PluginMacros.cmake | 50 -------------------------- config/cmake/scripts/CTestScript.cmake | 12 +++---- release_docs/RELEASE.txt | 11 +++--- 5 files changed, 19 insertions(+), 63 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6aa467d110b..5b76afecd1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -412,6 +412,13 @@ set (HDF5_PACKAGE_TARNAME "${HDF5_PACKAGE}${HDF_PACKAGE_EXT}") set (HDF5_PACKAGE_URL "http://www.hdfgroup.org") set (HDF5_PACKAGE_BUGREPORT "help@hdfgroup.org") +#----------------------------------------------------------------------------- +# Set variables needed for installation +#----------------------------------------------------------------------------- +set (HDF5_VERSION_STRING ${HDF5_PACKAGE_VERSION}) +set (HDF5_VERSION_MAJOR ${HDF5_PACKAGE_VERSION_MAJOR}) +set (HDF5_VERSION_MINOR ${HDF5_PACKAGE_VERSION_MINOR}) + #----------------------------------------------------------------------------- # Include some macros for reusable code #----------------------------------------------------------------------------- diff --git a/config/cmake/HDF5PluginCache.cmake b/config/cmake/HDF5PluginCache.cmake index 34a97d5902a..14075616173 100644 --- a/config/cmake/HDF5PluginCache.cmake +++ b/config/cmake/HDF5PluginCache.cmake @@ -6,7 +6,7 @@ # examples are the tests for plugins set (H5PL_BUILD_TESTING ON CACHE BOOL "Enable H5PL testing" FORCE) -set (BUILD_EXAMPLES ON CACHE BOOL "Build H5PL Examples" FORCE) +set (BUILD_EXAMPLES ${HDF5_BUILD_EXAMPLES} CACHE BOOL "Build H5PL Examples" FORCE) #preset HDF5 cache vars to this projects libraries instead of searching set (H5PL_HDF5_HEADER "H5pubconf.h" CACHE STRING "Name of HDF5 header" FORCE) diff --git a/config/cmake/HDF5PluginMacros.cmake b/config/cmake/HDF5PluginMacros.cmake index 41f746fb002..e2bdce3f33f 100644 --- a/config/cmake/HDF5PluginMacros.cmake +++ b/config/cmake/HDF5PluginMacros.cmake @@ -31,56 +31,6 @@ macro (EXTERNAL_PLUGIN_LIBRARY compress_type) include (${HDF_RESOURCES_DIR}/HDF5PluginCache.cmake) set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) add_subdirectory(${plugin_SOURCE_DIR} ${plugin_BINARY_DIR}) - if (ENABLE_BLOSC) - add_dependencies (h5blosc ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_blosc ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_blosc PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_BSHUF) - add_dependencies (h5bshuf ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_bshuf ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_bshuf PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_BZIP2) - add_dependencies (h5bz2 ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_bzip2 ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_bzip2 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_JPEG) - add_dependencies (h5jpeg ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_jpeg ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_jpeg PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_LZ4) - add_dependencies (h5lz4 ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_lz4 ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_lz4 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_LZF) - add_dependencies (h5lzf ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_lzf ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_lzf PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_MAFISC) - add_dependencies (h5mafisc ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_mafisc ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_mafisc PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_SZ) - add_dependencies (h5sz ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_sz ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_sz PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_ZFP) - add_dependencies (h5zfp ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_zfp ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_zfp PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_ZSTD) - add_dependencies (h5zstd ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_zstd ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_zstd PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () endif () message (VERBOSE "HDF5_INCLUDE_DIR=${HDF5_INCLUDE_DIR}") set (PLUGIN_BINARY_DIR "${plugin_BINARY_DIR}") diff --git a/config/cmake/scripts/CTestScript.cmake b/config/cmake/scripts/CTestScript.cmake index f277864b7ee..2a57db8db7b 100644 --- a/config/cmake/scripts/CTestScript.cmake +++ b/config/cmake/scripts/CTestScript.cmake @@ -202,14 +202,14 @@ endforeach () # Initialize the CTEST commands #------------------------------ if (CMAKE_GENERATOR_TOOLSET) - set (CTEST_CONFIGURE_TOOLSET "-T${CMAKE_GENERATOR_TOOLSET}") + set (CTEST_CONFIGURE_TOOLSET "\"-T${CMAKE_GENERATOR_TOOLSET}\"") else () - set (CTEST_CONFIGURE_TOOLSET "") + set (CTEST_CONFIGURE_TOOLSET) endif() if (CMAKE_GENERATOR_ARCHITECTURE) - set (CTEST_CONFIGURE_ARCHITECTURE "-A${CMAKE_GENERATOR_ARCHITECTURE}") + set (CTEST_CONFIGURE_ARCHITECTURE "\"-A${CMAKE_GENERATOR_ARCHITECTURE}\"") else () - set (CTEST_CONFIGURE_ARCHITECTURE "") + set (CTEST_CONFIGURE_ARCHITECTURE) endif() if (LOCAL_MEMCHECK_TEST) if(LOCAL_USE_VALGRIND) @@ -217,7 +217,7 @@ if (LOCAL_MEMCHECK_TEST) find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind) endif() set (CTEST_CONFIGURE_COMMAND - "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/mccacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_CONFIGURE_ARCHITECTURE}\" \"${CTEST_CONFIGURE_TOOLSET}\" \"${CTEST_SOURCE_DIRECTORY}\"" + "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/mccacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" ${CTEST_CONFIGURE_ARCHITECTURE} ${CTEST_CONFIGURE_TOOLSET} \"${CTEST_SOURCE_DIRECTORY}\"" ) else () if (LOCAL_COVERAGE_TEST) @@ -226,7 +226,7 @@ else () endif () endif () set (CTEST_CONFIGURE_COMMAND - "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_CONFIGURE_ARCHITECTURE}\" \"${CTEST_CONFIGURE_TOOLSET}\" \"${CTEST_SOURCE_DIRECTORY}\"" + "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" ${CTEST_CONFIGURE_ARCHITECTURE} ${CTEST_CONFIGURE_TOOLSET} \"${CTEST_SOURCE_DIRECTORY}\"" ) endif () diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index b50fe611213..ea774af4562 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -73,6 +73,7 @@ New Features Removed HDF options for using FETCH_CONTENT explicitly: BUILD_SZIP_WITH_FETCHCONTENT:BOOL BUILD_ZLIB_WITH_FETCHCONTENT:BOOL + - Thread-safety + static library disabled on Windows w/ CMake The thread-safety feature requires hooks in DllMain(), which is only @@ -286,7 +287,6 @@ New Features Fortran Library: ---------------- - - Fixed an uninitialized error return value for hdferr to return the error state of the h5aopen_by_idx_f API. @@ -831,6 +831,7 @@ Bug Fixes since HDF5-1.14.0 release ----------- - + High-Level Library ------------------ - @@ -1007,14 +1008,12 @@ Platforms Tested x86_64; Version 19.10-0 - Windows 10 x64 Visual Studio 2015 w/ Intel C/C++/Fortran 18 (cmake) - Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake) - Visual Studio 2019 w/ clang 12.0.0 + Windows 10 x64 Visual Studio 2019 w/ clang 12.0.0 with MSVC-like command-line (C/C++ only - cmake) - Visual Studio 2019 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2019 w/ Intel C/C++ only cmake) Visual Studio 2022 w/ clang 15.0.1 with MSVC-like command-line (C/C++ only - cmake) - Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2023 (cmake) Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) From c4a146efc40c66de7d06a387465c9f7ea9b2e280 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Thu, 19 Oct 2023 08:14:20 -0500 Subject: [PATCH 022/101] Fix issue with unmatched messages in ph5diff (#3719) --- release_docs/RELEASE.txt | 13 +++++++++++++ tools/lib/h5diff.c | 3 --- tools/src/h5diff/ph5diff_main.c | 8 ++++---- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index ea774af4562..83c20b07d3d 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -791,6 +791,19 @@ Bug Fixes since HDF5-1.14.0 release Tools ----- + - Fixed an issue with unmatched MPI messages in ph5diff + + The "manager" MPI rank in ph5diff was unintentionally sending "program end" + messages to its workers twice, leading to an error from MPICH similar to the + following: + + Abort(810645519) on node 1 (rank 1 in comm 0): Fatal error in internal_Finalize: Other MPI error, error stack: + internal_Finalize(50)...........: MPI_Finalize failed + MPII_Finalize(394)..............: + MPIR_Comm_delete_internal(1224).: Communicator (handle=44000000) being freed has 1 unmatched message(s) + MPIR_Comm_release_always(1250)..: + MPIR_finalize_builtin_comms(154): + - Fixed an issue in h5repack for variable-length typed datasets When repacking datasets into a new file, h5repack tries to determine whether diff --git a/tools/lib/h5diff.c b/tools/lib/h5diff.c index 924f9f35de1..15f2a1428bf 100644 --- a/tools/lib/h5diff.c +++ b/tools/lib/h5diff.c @@ -1485,9 +1485,6 @@ diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1, hid_t file2_id, } /* end else */ } /* end while */ - for (i = 1; (int)i < g_nTasks; i++) - MPI_Send(NULL, 0, MPI_BYTE, (int)i, MPI_TAG_END, MPI_COMM_WORLD); - /* Print any final data waiting in our queue */ print_incoming_data(); } /* end if */ diff --git a/tools/src/h5diff/ph5diff_main.c b/tools/src/h5diff/ph5diff_main.c index 0f432610788..f90bd484ac8 100644 --- a/tools/src/h5diff/ph5diff_main.c +++ b/tools/src/h5diff/ph5diff_main.c @@ -127,7 +127,7 @@ ph5diff_worker(int nID) char filenames[2][MAX_FILENAME]; /* Retrieve filenames */ - MPI_Recv(filenames, MAX_FILENAME * 2, MPI_CHAR, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &Status); + MPI_Recv(filenames, MAX_FILENAME * 2, MPI_CHAR, 0, MPI_TAG_PARALLEL, MPI_COMM_WORLD, &Status); /* disable error reporting */ H5E_BEGIN_TRY @@ -173,7 +173,7 @@ ph5diff_worker(int nID) /* When get token, send all of our output to the manager task and then return the token */ for (i = 0; i < outBuffOffset; i += PRINT_DATA_MAX_SIZE) - MPI_Send(outBuff + i, PRINT_DATA_MAX_SIZE, MPI_BYTE, 0, MPI_TAG_PRINT_DATA, + MPI_Send(outBuff + i, PRINT_DATA_MAX_SIZE, MPI_CHAR, 0, MPI_TAG_PRINT_DATA, MPI_COMM_WORLD); /* An overflow file exists, so we send it's output to the manager too and then delete it */ @@ -188,7 +188,7 @@ ph5diff_worker(int nID) while ((tmp = getc(overflow_file)) >= 0) { *(out_data + i++) = (char)tmp; if (i == PRINT_DATA_MAX_SIZE) { - MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_BYTE, 0, MPI_TAG_PRINT_DATA, + MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_CHAR, 0, MPI_TAG_PRINT_DATA, MPI_COMM_WORLD); i = 0; memset(out_data, 0, PRINT_DATA_MAX_SIZE); @@ -196,7 +196,7 @@ ph5diff_worker(int nID) } if (i > 0) - MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_BYTE, 0, MPI_TAG_PRINT_DATA, + MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_CHAR, 0, MPI_TAG_PRINT_DATA, MPI_COMM_WORLD); fclose(overflow_file); From 8c69566da66e9239bbcd84b10e60068ade6c1784 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Thu, 19 Oct 2023 10:58:20 -0500 Subject: [PATCH 023/101] provide an alternative to mapfile for older bash (#3717) --- utils/subfiling_vfd/h5fuse.sh.in | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/utils/subfiling_vfd/h5fuse.sh.in b/utils/subfiling_vfd/h5fuse.sh.in index 82d497e3f1c..6f4bf619bbe 100755 --- a/utils/subfiling_vfd/h5fuse.sh.in +++ b/utils/subfiling_vfd/h5fuse.sh.in @@ -243,9 +243,15 @@ if test -z "$subfile_dir"; then exit $FAILED fi -# For bash 4.4+ subfs=$(sed -e '1,/subfile_dir=/d' "$file_config") -mapfile -t subfiles <<< "$subfs" +if command -v mapfile > /dev/null; then + # For bash 4.4+ + mapfile -t subfiles <<< "$subfs" +else + while IFS= read -r line; do + subfiles+=("$line") + done <<< "$subfs" +fi if [ ${#subfiles[@]} -eq 0 ]; then echo -e "$RED failed to find subfiles list in $file_config $NC" exit $FAILED From 5672fd817732e6a7fda3886010452bc7c8598cf8 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Thu, 19 Oct 2023 16:40:08 -0500 Subject: [PATCH 024/101] Attempt to quiet some warnings with cray compilers. (#3724) --- fortran/test/tH5D.F90 | 8 +------- fortran/test/tH5E_F03.F90 | 8 +++----- fortran/test/tH5F.F90 | 18 ------------------ fortran/test/tH5G.F90 | 6 +++--- fortran/test/tH5O_F03.F90 | 17 +++++------------ fortran/test/tH5P_F03.F90 | 2 -- fortran/test/tH5Sselect.F90 | 3 --- fortran/test/tH5T.F90 | 2 -- fortran/test/tH5T_F03.F90 | 2 -- fortran/test/tH5Z.F90 | 8 +++----- fortran/test/vol_connector.F90 | 1 - fortran/testpar/async.F90 | 4 ---- 12 files changed, 15 insertions(+), 64 deletions(-) diff --git a/fortran/test/tH5D.F90 b/fortran/test/tH5D.F90 index c38123518fe..fc774144f14 100644 --- a/fortran/test/tH5D.F90 +++ b/fortran/test/tH5D.F90 @@ -38,6 +38,7 @@ MODULE TH5D USE HDF5 ! This module contains all necessary modules USE TH5_MISC USE TH5_MISC_GEN + USE ISO_C_BINDING CONTAINS SUBROUTINE datasettest(cleanup, total_error) @@ -514,8 +515,6 @@ END SUBROUTINE extenddsettest SUBROUTINE test_userblock_offset(cleanup, total_error) - USE ISO_C_BINDING - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(OUT) :: total_error @@ -631,8 +630,6 @@ END SUBROUTINE test_userblock_offset SUBROUTINE test_dset_fill(cleanup, total_error) - USE ISO_C_BINDING - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(OUT) :: total_error @@ -640,7 +637,6 @@ SUBROUTINE test_dset_fill(cleanup, total_error) INTEGER, PARAMETER :: DIM0=10 INTEGER, PARAMETER :: int_kind_1 = SELECTED_INT_KIND(2) !should map to INTEGER*1 on most modern processors INTEGER, PARAMETER :: int_kind_4 = SELECTED_INT_KIND(4) !should map to INTEGER*2 on most modern processors - INTEGER, PARAMETER :: int_kind_8 = SELECTED_INT_KIND(9) !should map to INTEGER*4 on most modern processors INTEGER, PARAMETER :: int_kind_16 = SELECTED_INT_KIND(18) !should map to INTEGER*8 on most modern processors INTEGER(KIND=int_kind_1) , DIMENSION(1:DIM0), TARGET :: data_i1 INTEGER(KIND=int_kind_4) , DIMENSION(1:DIM0), TARGET :: data_i4 @@ -991,8 +987,6 @@ END SUBROUTINE test_dset_fill SUBROUTINE test_direct_chunk_io(cleanup, total_error) - USE ISO_C_BINDING - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup diff --git a/fortran/test/tH5E_F03.F90 b/fortran/test/tH5E_F03.F90 index 2d8dd331bfe..c2bf74be061 100644 --- a/fortran/test/tH5E_F03.F90 +++ b/fortran/test/tH5E_F03.F90 @@ -92,17 +92,15 @@ END FUNCTION my_hdf5_error_handler_nodata END MODULE test_my_hdf5_error_handler - - MODULE TH5E_F03 + USE ISO_C_BINDING + USE test_my_hdf5_error_handler + CONTAINS SUBROUTINE test_error(total_error) - USE ISO_C_BINDING - USE test_my_hdf5_error_handler - IMPLICIT NONE INTEGER(hid_t), PARAMETER :: FAKE_ID = -1 diff --git a/fortran/test/tH5F.F90 b/fortran/test/tH5F.F90 index a5b67acac9a..b4d973e6844 100644 --- a/fortran/test/tH5F.F90 +++ b/fortran/test/tH5F.F90 @@ -44,8 +44,6 @@ MODULE TH5F CONTAINS SUBROUTINE h5openclose(total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -141,8 +139,6 @@ SUBROUTINE h5openclose(total_error) END SUBROUTINE h5openclose SUBROUTINE mountingtest(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -502,8 +498,6 @@ END SUBROUTINE mountingtest ! SUBROUTINE reopentest(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -690,8 +684,6 @@ END SUBROUTINE reopentest ! correct output for a given obj_id and filename. ! SUBROUTINE check_get_name(obj_id, fix_filename, len_filename, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE INTEGER(HID_T) :: obj_id ! Object identifier CHARACTER(LEN=80), INTENT(IN) :: fix_filename ! Expected filename @@ -780,8 +772,6 @@ END SUBROUTINE check_get_name ! SUBROUTINE get_name_test(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -846,8 +836,6 @@ END SUBROUTINE get_name_test ! created using the got property lists SUBROUTINE plisttest(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -946,8 +934,6 @@ END SUBROUTINE plisttest ! SUBROUTINE file_close(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -1075,8 +1061,6 @@ END SUBROUTINE file_close ! SUBROUTINE file_space(filename, cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE CHARACTER(*), INTENT(IN) :: filename LOGICAL, INTENT(IN) :: cleanup @@ -1168,8 +1152,6 @@ END SUBROUTINE file_space ! SUBROUTINE test_file_info(filename, cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE CHARACTER(*), INTENT(IN) :: filename LOGICAL, INTENT(IN) :: cleanup diff --git a/fortran/test/tH5G.F90 b/fortran/test/tH5G.F90 index 83e0101bd3b..36c51df54a1 100644 --- a/fortran/test/tH5G.F90 +++ b/fortran/test/tH5G.F90 @@ -26,6 +26,9 @@ MODULE TH5G + USE HDF5 ! This module contains all necessary modules + USE TH5_MISC + CONTAINS SUBROUTINE group_test(cleanup, total_error) @@ -35,9 +38,6 @@ SUBROUTINE group_test(cleanup, total_error) ! h5glink(2)_f, h5gunlink_f, h5gmove(2)_f, h5gget_linkval_f, h5gset_comment_f, ! h5gget_comment_f - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error diff --git a/fortran/test/tH5O_F03.F90 b/fortran/test/tH5O_F03.F90 index e608a30d65c..b27b0678644 100644 --- a/fortran/test/tH5O_F03.F90 +++ b/fortran/test/tH5O_F03.F90 @@ -267,6 +267,10 @@ END MODULE visit_cb MODULE TH5O_F03 + USE HDF5 + USE TH5_MISC + USE ISO_C_BINDING + CONTAINS !*************************************************************** !** @@ -276,9 +280,6 @@ MODULE TH5O_F03 SUBROUTINE test_h5o_refcount(total_error) - USE HDF5 - USE TH5_MISC - USE ISO_C_BINDING IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -415,11 +416,8 @@ END SUBROUTINE test_h5o_refcount SUBROUTINE test_obj_visit(total_error) - USE HDF5 - USE TH5_MISC - USE visit_cb - USE ISO_C_BINDING + IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -553,9 +551,6 @@ END SUBROUTINE test_obj_visit SUBROUTINE test_obj_info(total_error) - USE HDF5 - USE TH5_MISC - USE ISO_C_BINDING IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -702,8 +697,6 @@ END SUBROUTINE test_obj_info SUBROUTINE build_visit_file(fid) - USE HDF5 - USE TH5_MISC IMPLICIT NONE INTEGER(hid_t) :: fid ! File ID diff --git a/fortran/test/tH5P_F03.F90 b/fortran/test/tH5P_F03.F90 index 24934eb3e05..c962d52821b 100644 --- a/fortran/test/tH5P_F03.F90 +++ b/fortran/test/tH5P_F03.F90 @@ -439,7 +439,6 @@ END SUBROUTINE test_genprop_class_callback SUBROUTINE test_h5p_file_image(total_error) - USE, INTRINSIC :: iso_c_binding IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error INTEGER(hid_t) :: fapl_1 = -1 @@ -653,7 +652,6 @@ END SUBROUTINE external_test_offset ! SUBROUTINE test_vds(total_error) - USE ISO_C_BINDING IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error diff --git a/fortran/test/tH5Sselect.F90 b/fortran/test/tH5Sselect.F90 index 6dfd7e69f06..b6d28d32e07 100644 --- a/fortran/test/tH5Sselect.F90 +++ b/fortran/test/tH5Sselect.F90 @@ -319,9 +319,6 @@ END SUBROUTINE test_select_hyperslab SUBROUTINE test_select_element(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error diff --git a/fortran/test/tH5T.F90 b/fortran/test/tH5T.F90 index 953d6d07020..a38cbeadf53 100644 --- a/fortran/test/tH5T.F90 +++ b/fortran/test/tH5T.F90 @@ -819,8 +819,6 @@ END SUBROUTINE basic_data_type_test SUBROUTINE enumtest(cleanup, total_error) - USE HDF5 - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup diff --git a/fortran/test/tH5T_F03.F90 b/fortran/test/tH5T_F03.F90 index 661a0cc05f9..39845971644 100644 --- a/fortran/test/tH5T_F03.F90 +++ b/fortran/test/tH5T_F03.F90 @@ -3407,8 +3407,6 @@ SUBROUTINE multiple_dset_rw(total_error) ! Failure: number of errors !------------------------------------------------------------------------- ! - USE iso_c_binding - USE hdf5 IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error ! number of errors diff --git a/fortran/test/tH5Z.F90 b/fortran/test/tH5Z.F90 index 3ac51d6a977..c6ab3832d18 100644 --- a/fortran/test/tH5Z.F90 +++ b/fortran/test/tH5Z.F90 @@ -25,15 +25,15 @@ !***** MODULE TH5Z + USE HDF5 ! This module contains all necessary modules + USE TH5_MISC + CONTAINS SUBROUTINE filters_test(total_error) ! This subroutine tests following functionalities: h5zfilter_avail_f, h5zunregister_f - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC - IMPLICIT NONE INTEGER, INTENT(OUT) :: total_error LOGICAL :: status @@ -164,8 +164,6 @@ SUBROUTINE filters_test(total_error) END SUBROUTINE filters_test SUBROUTINE szip_test(szip_flag, cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(OUT) :: szip_flag diff --git a/fortran/test/vol_connector.F90 b/fortran/test/vol_connector.F90 index e2235f4bbbf..7394a31d6af 100644 --- a/fortran/test/vol_connector.F90 +++ b/fortran/test/vol_connector.F90 @@ -227,7 +227,6 @@ END MODULE VOL_TMOD PROGRAM vol_connector - USE HDF5 USE VOL_TMOD IMPLICIT NONE diff --git a/fortran/testpar/async.F90 b/fortran/testpar/async.F90 index 88ecc3edecd..02045cb7d58 100644 --- a/fortran/testpar/async.F90 +++ b/fortran/testpar/async.F90 @@ -1240,10 +1240,6 @@ END MODULE test_async_APIs ! PROGRAM async_test USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_INT64_T - USE HDF5 - USE MPI - USE TH5_MISC - USE TH5_MISC_GEN USE test_async_APIs IMPLICIT NONE From 5844f498b03c9da5eb99c6665f4a8ff4663b190f Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 19 Oct 2023 16:58:33 -0500 Subject: [PATCH 025/101] Add an option to disable doxygen warn as error (#3708) Add for both CMake and the Autotools * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: ON) * --enable-doxygen-errors: enable/disable (Default: enable) The default will fail compile if the doxygen parsing generates warnings. The option can be disabled if certain versions of doxygen have parsing issues. i.e. 1.9.5, 1.9.8. Fixes #3398 --- CMakeLists.txt | 7 +++++++ configure.ac | 25 +++++++++++++++++++++++++ doxygen/CMakeLists.txt | 1 + doxygen/Doxyfile.in | 2 +- release_docs/INSTALL_CMake.txt | 1 + release_docs/RELEASE.txt | 12 ++++++++++++ 6 files changed, 47 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5b76afecd1b..c440c58b27f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -959,6 +959,13 @@ if (HDF5_BUILD_DOC AND EXISTS "${HDF5_DOXYGEN_DIR}" AND IS_DIRECTORY "${HDF5_DOX # check if Doxygen is installed find_package(Doxygen) if (DOXYGEN_FOUND) + option (HDF5_ENABLE_DOXY_WARNINGS "Enable fail if doxygen parsing has warnings." ON) + mark_as_advanced (HDF5_ENABLE_DOXY_WARNINGS) + if (HDF5_ENABLE_DOXY_WARNINGS) + set (HDF5_DOXY_WARNINGS "FAIL_ON_WARNINGS") + else () + set (HDF5_DOXY_WARNINGS "NO") + endif () message(STATUS "Doxygen version: ${DOXYGEN_VERSION}") add_subdirectory (doxygen) else () diff --git a/configure.ac b/configure.ac index 02c47d61002..44ab43a4664 100644 --- a/configure.ac +++ b/configure.ac @@ -1207,6 +1207,30 @@ AC_ARG_ENABLE([doxygen], AC_MSG_RESULT([$HDF5_DOXYGEN]) +## Check if they would like to enable doxygen warnings as errors +## + +## This needs to be exposed for the library info file. +AC_SUBST([HDF5_DOXY_WARNINGS]) + +## Default is to consider doxygen warnings as errors +DOXY_ERR=yes + +AC_MSG_CHECKING([if doxygen warnings as errors is enabled]) + +AC_ARG_ENABLE([doxygen-errors], + [AS_HELP_STRING([--enable-doxygen-errors], + [Error on HDF5 doxygen warnings [default=yes]])], + [DOXY_ERR=$enableval]) + +if test "X$DOXY_ERR" = "Xyes"; then + HDF5_DOXY_WARNINGS="FAIL_ON_WARNINGS" +else + HDF5_DOXY_WARNINGS="NO" + +fi +AC_MSG_RESULT([$HDF5_DOXY_WARNINGS]) + if test "X$HDF5_DOXYGEN" = "Xyes"; then DX_DOXYGEN_FEATURE(ON) DX_DOT_FEATURE(OFF) @@ -1269,6 +1293,7 @@ if test "X$HDF5_DOXYGEN" = "Xyes"; then DOXYGEN_STRIP_FROM_PATH='$(SRCDIR)' DOXYGEN_STRIP_FROM_INC_PATH='$(SRCDIR)' DOXYGEN_PREDEFINED='H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD H5_DOXYGEN H5_HAVE_SUBFILING_VFD H5_HAVE_IOC_VFD H5_HAVE_MIRROR_VFD' + DOXYGEN_WARN_AS_ERROR=${HDF5_DOXY_WARNINGS} DX_INIT_DOXYGEN([HDF5], [./doxygen/Doxyfile], [hdf5lib_docs]) fi diff --git a/doxygen/CMakeLists.txt b/doxygen/CMakeLists.txt index 7bfbe74906a..8fe3b771103 100644 --- a/doxygen/CMakeLists.txt +++ b/doxygen/CMakeLists.txt @@ -30,6 +30,7 @@ if (DOXYGEN_FOUND) set (DOXYGEN_STRIP_FROM_PATH ${HDF5_SOURCE_DIR}) set (DOXYGEN_STRIP_FROM_INC_PATH ${HDF5_SOURCE_DIR}) set (DOXYGEN_PREDEFINED "H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD H5_DOXYGEN H5_HAVE_SUBFILING_VFD H5_HAVE_IOC_VFD H5_HAVE_MIRROR_VFD") + set (DOXYGEN_WARN_AS_ERROR ${HDF5_DOXY_WARNINGS}) # This configure and individual custom targets work together # Replace variables inside @@ with the current values diff --git a/doxygen/Doxyfile.in b/doxygen/Doxyfile.in index b24b9f9e509..464e09a7201 100644 --- a/doxygen/Doxyfile.in +++ b/doxygen/Doxyfile.in @@ -625,7 +625,7 @@ WARN_NO_PARAMDOC = NO # a warning is encountered. # The default value is: NO. -WARN_AS_ERROR = FAIL_ON_WARNINGS +WARN_AS_ERROR = @DOXYGEN_WARN_AS_ERROR@ # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index 281c9a18f6c..c837d1c4e02 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -859,6 +859,7 @@ HDF5_ENABLE_ANALYZER_TOOLS "enable the use of Clang tools" HDF5_ENABLE_SANITIZERS "execute the Clang sanitizer" OFF HDF5_ENABLE_FORMATTERS "format source files" OFF HDF5_DIMENSION_SCALES_NEW_REF "Use new-style references with dimension scale APIs" OFF +HDF5_ENABLE_DOXY_WARNINGS "Enable fail if doxygen parsing has warnings." ON ---------------- HDF5 Advanced Test Options --------------------- if (BUILD_TESTING) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 83c20b07d3d..d2411d9a111 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,18 @@ New Features Configuration: ------------- + - Added new options for CMake and Autotools to control the Doxygen + warnings as errors setting. + + * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: ON) + * --enable-doxygen-errors: enable/disable (Default: enable) + + The default will fail compile if the doxygen parsing generates warnings. + The option can be disabled if certain versions of doxygen have parsing + issues. i.e. 1.9.5, 1.9.8. + + Addresses GitHub issue #3398 + - Added support for AOCC and classic Flang w/ the Autotools * Adds a config/clang-fflags options file to support Flang From 11f3804c48766ac9e1ce9f41ed4c940a3e28c0d4 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Thu, 19 Oct 2023 16:59:18 -0500 Subject: [PATCH 026/101] Fix CMake VOL passthrough tests by copying files to correct directory (#3721) --- test/CMakePassthroughVOLTests.cmake | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/CMakePassthroughVOLTests.cmake b/test/CMakePassthroughVOLTests.cmake index a5d9b598fb8..853e4b339d6 100644 --- a/test/CMakePassthroughVOLTests.cmake +++ b/test/CMakePassthroughVOLTests.cmake @@ -37,19 +37,19 @@ endforeach () foreach (voltest ${VOL_LIST}) foreach (h5_tfile ${HDF5_TEST_FILES}) - HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/${voltest}/${h5_tfile}" "HDF5_VOLTEST_LIB_files") + HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/${voltest}/testfiles/${h5_tfile}" "HDF5_VOLTEST_LIB_files") endforeach () endforeach () foreach (voltest ${VOL_LIST}) foreach (ref_file ${HDF5_REFERENCE_FILES}) - HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/${voltest}/${ref_file}" "HDF5_VOLTEST_LIB_files") + HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/${voltest}/testfiles/${ref_file}" "HDF5_VOLTEST_LIB_files") endforeach () endforeach () foreach (voltest ${VOL_LIST}) foreach (h5_file ${HDF5_REFERENCE_TEST_FILES}) - HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/${voltest}/${h5_file}" "HDF5_VOLTEST_LIB_files") + HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/${voltest}/testfiles/${h5_file}" "HDF5_VOLTEST_LIB_files") endforeach () endforeach () From 2bb6619528c1faef3382705f9635f85ad32e6f6d Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 19 Oct 2023 17:17:20 -0500 Subject: [PATCH 027/101] Develop intel split (#3722) * Split intel compiler flags into sub-folders * Update Intel options for warnings * Mostly CMake, Autotools needs additional work --- config/cmake/HDFCXXCompilerFlags.cmake | 38 ++++++-- config/cmake/HDFCompilerFlags.cmake | 90 +++++++++++-------- config/cmake/HDFFortranCompilerFlags.cmake | 20 ++++- config/gnu-warnings/developer-general | 8 -- config/intel-cxxflags | 12 +-- config/intel-fflags | 2 +- config/intel-flags | 12 +-- config/intel-warnings/{ => classic}/15 | 0 config/intel-warnings/{ => classic}/18 | 0 .../intel-warnings/classic/developer-general | 3 + config/intel-warnings/{ => classic}/general | 0 .../{ => classic}/ifort-general | 0 .../{ => classic}/win-developer-general | 0 .../intel-warnings/{ => classic}/win-general | 0 .../{ => classic}/win-ifort-general | 0 config/intel-warnings/developer-general | 11 --- .../intel-warnings/oneapi/developer-general | 2 + config/intel-warnings/oneapi/general | 1 + config/intel-warnings/oneapi/ifort-general | 1 + .../oneapi/win-developer-general | 2 + config/intel-warnings/oneapi/win-general | 1 + .../intel-warnings/oneapi/win-ifort-general | 1 + 22 files changed, 125 insertions(+), 79 deletions(-) rename config/intel-warnings/{ => classic}/15 (100%) rename config/intel-warnings/{ => classic}/18 (100%) create mode 100644 config/intel-warnings/classic/developer-general rename config/intel-warnings/{ => classic}/general (100%) rename config/intel-warnings/{ => classic}/ifort-general (100%) rename config/intel-warnings/{ => classic}/win-developer-general (100%) rename config/intel-warnings/{ => classic}/win-general (100%) rename config/intel-warnings/{ => classic}/win-ifort-general (100%) delete mode 100644 config/intel-warnings/developer-general create mode 100644 config/intel-warnings/oneapi/developer-general create mode 100644 config/intel-warnings/oneapi/general create mode 100644 config/intel-warnings/oneapi/ifort-general create mode 100644 config/intel-warnings/oneapi/win-developer-general create mode 100644 config/intel-warnings/oneapi/win-general create mode 100644 config/intel-warnings/oneapi/win-ifort-general diff --git a/config/cmake/HDFCXXCompilerFlags.cmake b/config/cmake/HDFCXXCompilerFlags.cmake index 13f712dd344..dd120c911cb 100644 --- a/config/cmake/HDFCXXCompilerFlags.cmake +++ b/config/cmake/HDFCXXCompilerFlags.cmake @@ -21,7 +21,7 @@ message (VERBOSE "Warnings Configuration: CXX default: ${CMAKE_CXX_FLAGS}") #----------------------------------------------------------------------------- # Compiler specific flags : Shouldn't there be compiler tests for these #----------------------------------------------------------------------------- -if (WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel") +if (WIN32 AND (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM")) set (_INTEL_WINDOWS 1) endif () @@ -130,16 +130,16 @@ else () # warnings that are emitted. If you need it, add it at configure time. if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") if (_INTEL_WINDOWS) - ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-general") + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-general") else () - ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/general") + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/general") endif() if (NOT _INTEL_WINDOWS) if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15.0) - ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/15") + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/15") endif() if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 18.0) - ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/18") + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/18") endif() endif() elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") @@ -149,7 +149,13 @@ else () ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/cxx-general") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/cxx-error-general") endif () - elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-general") + else () + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/general") + endif() + elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/general") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "PGI") list (APPEND HDF5_CMAKE_CXX_FLAGS "-Minform=inform") @@ -164,18 +170,28 @@ endif () if (HDF5_ENABLE_DEV_WARNINGS) message (STATUS "....HDF5 developer group warnings are enabled") if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/developer-general") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-developer-general") + else () + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/developer-general") + endif() elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Use the C warnings as CXX warnings are the same ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/developer-general") - elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-developer-general") + else () + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/developer-general") + endif() + elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/developer-general") endif () else () if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Use the C warnings as CXX warnings are the same ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/no-developer-general") - elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/no-developer-general") endif () endif () @@ -307,6 +323,8 @@ if (HDF5_ENABLE_SYMBOLS MATCHES "YES") if (CMAKE_CXX_COMPILER_LOADED) if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND NOT _INTEL_WINDOWS) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" AND NOT _INTEL_WINDOWS) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") endif () @@ -315,6 +333,8 @@ elseif (HDF5_ENABLE_SYMBOLS MATCHES "NO") if (CMAKE_CXX_COMPILER_LOADED) if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND NOT _INTEL_WINDOWS) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-s") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" AND NOT _INTEL_WINDOWS) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-s") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -s") endif () diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index a6bce982849..e7b9337f39c 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -9,8 +9,8 @@ # If you do not have access to either file, you may request a copy from # help@hdfgroup.org. # -set(CMAKE_C_STANDARD 99) -set(CMAKE_C_STANDARD_REQUIRED TRUE) +set (CMAKE_C_STANDARD 99) +set (CMAKE_C_STANDARD_REQUIRED TRUE) set (CMAKE_C_FLAGS "${CMAKE_C99_STANDARD_COMPILE_OPTION} ${CMAKE_C_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_SANITIZER_FLAGS} ${CMAKE_C_FLAGS}") @@ -18,45 +18,45 @@ message (VERBOSE "Warnings Configuration: C default: ${CMAKE_C_FLAGS}") #----------------------------------------------------------------------------- # Compiler specific flags : Shouldn't there be compiler tests for these #----------------------------------------------------------------------------- -if(WIN32 AND CMAKE_C_COMPILER_ID STREQUAL "Intel") - set(_INTEL_WINDOWS 1) -endif() +if (WIN32 AND (CMAKE_C_COMPILER_ID STREQUAL "Intel" OR CMAKE_C_COMPILER_ID MATCHES "IntelLLVM")) + set (_INTEL_WINDOWS 1) +endif () -if(WIN32 AND CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" AND "x${CMAKE_C_SIMULATE_ID}" STREQUAL "xMSVC") - set(_CLANG_MSVC_WINDOWS 1) -endif() +if (WIN32 AND CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" AND "x${CMAKE_C_SIMULATE_ID}" STREQUAL "xMSVC") + set (_CLANG_MSVC_WINDOWS 1) +endif () # Disable deprecation warnings for standard C functions. # really only needed for newer versions of VS, but should # not hurt other versions, and this will work into the # future -if(MSVC OR _INTEL_WINDOWS OR _CLANG_MSVC_WINDOWS) - add_definitions(-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) -endif() +if (MSVC OR _INTEL_WINDOWS OR _CLANG_MSVC_WINDOWS) + add_definitions (-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) +endif () -if(MSVC) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stack:10000000") -endif() +if (MSVC) + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stack:10000000") +endif () # MSVC 14.28 enables C5105, but the Windows SDK 10.0.18362.0 triggers it. -if(CMAKE_C_COMPILER_ID STREQUAL "MSVC" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 19.28) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd5105") -endif() +if (CMAKE_C_COMPILER_ID STREQUAL "MSVC" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 19.28) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd5105") +endif () if(_CLANG_MSVC_WINDOWS AND "x${CMAKE_C_COMPILER_FRONTEND_VARIANT}" STREQUAL "xGNU") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Xlinker -stack:20000000") endif() -if(CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Minform=warn") +if (CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Minform=warn") if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") if (NOT ${HDF_CFG_NAME} MATCHES "RelWithDebInfo") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s") endif () else () - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -g") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -g") endif () -endif() +endif () if (CMAKE_COMPILER_IS_GNUCC) set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}") @@ -146,20 +146,20 @@ else () # warnings that are emitted. If you need it, add it at configure time. if (CMAKE_C_COMPILER_ID STREQUAL "Intel") if (_INTEL_WINDOWS) - ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-general") + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-general") else () - ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/general") + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/general") endif() if (NOT _INTEL_WINDOWS) - if(NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 15.0) - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/15") - endif() + if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 15.0) + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/15") + endif () # this is just a failsafe list (APPEND H5_CFLAGS "-finline-functions") - if(NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 18.0) - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/18") - endif() - endif() + if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 18.0) + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/18") + endif () + endif () elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU") # Add general CFlags for GCC versions 4.8 and above if (CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) @@ -169,7 +169,15 @@ else () # gcc automatically inlines based on the optimization level # this is just a failsafe list (APPEND H5_CFLAGS "-finline-functions") - elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-general") + else () + # this is just a failsafe + list (APPEND H5_CFLAGS "-finline-functions") + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/general") + endif () + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/general") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/error-general") elseif (CMAKE_C_COMPILER_ID STREQUAL "PGI") @@ -191,13 +199,19 @@ if (HDF5_ENABLE_DEV_WARNINGS) message (STATUS "....HDF5 developer group warnings are enabled") if (CMAKE_C_COMPILER_ID STREQUAL "Intel") if (_INTEL_WINDOWS) - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-developer-general") + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-developer-general") else () - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/developer-general") + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/developer-general") endif () elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/developer-general") - elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-developer-general") + else () + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/developer-general") + endif () + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/developer-general") endif () @@ -211,12 +225,16 @@ if (HDF5_ENABLE_DEV_WARNINGS) list (APPEND H5_CFLAGS "-Winline") elseif (CMAKE_C_COMPILER_ID STREQUAL "Intel" AND NOT _INTEL_WINDOWS) list (APPEND H5_CFLAGS "-Winline") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" AND NOT _INTEL_WINDOWS) + list (APPEND H5_CFLAGS "-Winline") + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + list (APPEND H5_CFLAGS "-Winline") endif () endif () else () if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/no-developer-general") - elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/no-developer-general") endif () endif () diff --git a/config/cmake/HDFFortranCompilerFlags.cmake b/config/cmake/HDFFortranCompilerFlags.cmake index f207c7062c5..8ac3f490cc3 100644 --- a/config/cmake/HDFFortranCompilerFlags.cmake +++ b/config/cmake/HDFFortranCompilerFlags.cmake @@ -23,6 +23,8 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) set (HDF5_WARNINGS_BLOCKED 1) if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} /warn:none") + elseif (CMAKE_Fortran_COMPILER_ID MATCHES "IntelLLVM") + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} /warn:none") endif () endif () if (WIN32) @@ -70,7 +72,11 @@ endif () if (NOT MSVC AND NOT MINGW) # General flags if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") - ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/ifort-general") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-ifort-general") + else () + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/ifort-general") + endif() list (APPEND HDF5_CMAKE_Fortran_FLAGS "-free") elseif (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/gfort-general") @@ -85,6 +91,13 @@ if (NOT MSVC AND NOT MINGW) else () list (APPEND HDF5_CMAKE_Fortran_FLAGS "-std=f2008") endif () + elseif (CMAKE_Fortran_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-ifort-general") + else () + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/ifort-general") + endif() + list (APPEND HDF5_CMAKE_Fortran_FLAGS "-free") elseif (CMAKE_Fortran_COMPILER_ID STREQUAL "PGI") list (APPEND HDF5_CMAKE_Fortran_FLAGS "-Mfreeform" "-Mdclchk" "-Mstandard" "-Mallocatable=03") endif () @@ -135,7 +148,10 @@ if (NOT MSVC AND NOT MINGW) endif () else () if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") - ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-ifort-general") + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-ifort-general") + list (APPEND HDF5_CMAKE_Fortran_FLAGS "/stand:f03" "/free") + elseif (CMAKE_Fortran_COMPILER_ID MATCHES "IntelLLVM") + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-ifort-general") list (APPEND HDF5_CMAKE_Fortran_FLAGS "/stand:f03" "/free") endif () endif () diff --git a/config/gnu-warnings/developer-general b/config/gnu-warnings/developer-general index 79ecd6a054b..af701725200 100644 --- a/config/gnu-warnings/developer-general +++ b/config/gnu-warnings/developer-general @@ -7,11 +7,3 @@ -Wswitch-enum -Wunsafe-loop-optimizations -Wunused-macros -# -Winline warnings aren't included here because, for at least -# GNU compilers, this flag appears to conflict specifically with -# the -Og optimization level flag added for Debug and Developer -# builds and will produce warnings about functions not being -# considered for inlining. The flag will be added to the list -# of compiler flags separately if developer warnings are enabled -# and the build type is not Debug or Developer -#-Winline diff --git a/config/intel-cxxflags b/config/intel-cxxflags index 107b08757a9..40a3f0e9d34 100644 --- a/config/intel-cxxflags +++ b/config/intel-cxxflags @@ -129,15 +129,15 @@ if test "X-icpc" = "X-$cxx_vendor"; then # Add various general warning flags in intel-warnings. # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments general)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/general)" ###################### # Developer warnings # ###################### # Use the C warnings as CXX warnings are the same - #NO_DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments no-developer-general) - #DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments developer-general) + #NO_DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments classic/no-developer-general) + #DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments classic/developer-general) ############################# # Version-specific warnings # @@ -157,19 +157,19 @@ if test "X-icpc" = "X-$cxx_vendor"; then # intel >= 15 if test $cxx_vers_major -ge 15; then # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments 15)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/15)" fi # intel >= 18 if test $cxx_vers_major -ge 18; then # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments 18)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/18)" fi # intel <= 19 if test $cxx_vers_major -le 19; then # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments general-19)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/general-19)" fi ################# diff --git a/config/intel-fflags b/config/intel-fflags index ad1ce7c4bb5..b6307c1bedb 100644 --- a/config/intel-fflags +++ b/config/intel-fflags @@ -123,7 +123,7 @@ if test "X-ifort" = "X-$f9x_vendor"; then ########### H5_FCFLAGS="$H5_FCFLAGS -free" - H5_FCFLAGS="$H5_FCFLAGS $(load_intel_arguments ifort-general)" + H5_FCFLAGS="$H5_FCFLAGS $(load_intel_arguments classic/ifort-general)" ############################# # Version-specific warnings # diff --git a/config/intel-flags b/config/intel-flags index fbec7efdf6b..134452cc11d 100644 --- a/config/intel-flags +++ b/config/intel-flags @@ -127,14 +127,14 @@ if test "X-icc" = "X-$cc_vendor"; then ########### # Add various general warning flags in intel-warnings. - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments general)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/general)" ###################### # Developer warnings # ###################### - #NO_DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments no-developer-general) - #DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments developer-general) + #NO_DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments classic/no-developer-general) + #DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments classic/developer-general) ############################# # Version-specific warnings # @@ -153,18 +153,18 @@ if test "X-icc" = "X-$cc_vendor"; then # intel >= 15 if test $cc_vers_major -ge 15; then - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments 15)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/15)" fi # intel >= 18 if test $cc_vers_major -ge 18; then - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments 18)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/18)" fi # intel <= 19 # this file has warnings only available before oneapi versions if test $cc_vers_major -le 19; then - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments general-19)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/general-19)" fi ################# diff --git a/config/intel-warnings/15 b/config/intel-warnings/classic/15 similarity index 100% rename from config/intel-warnings/15 rename to config/intel-warnings/classic/15 diff --git a/config/intel-warnings/18 b/config/intel-warnings/classic/18 similarity index 100% rename from config/intel-warnings/18 rename to config/intel-warnings/classic/18 diff --git a/config/intel-warnings/classic/developer-general b/config/intel-warnings/classic/developer-general new file mode 100644 index 00000000000..6f4e9e9f4b8 --- /dev/null +++ b/config/intel-warnings/classic/developer-general @@ -0,0 +1,3 @@ +-Wreorder +-Wport +-Wstrict-aliasing diff --git a/config/intel-warnings/general b/config/intel-warnings/classic/general similarity index 100% rename from config/intel-warnings/general rename to config/intel-warnings/classic/general diff --git a/config/intel-warnings/ifort-general b/config/intel-warnings/classic/ifort-general similarity index 100% rename from config/intel-warnings/ifort-general rename to config/intel-warnings/classic/ifort-general diff --git a/config/intel-warnings/win-developer-general b/config/intel-warnings/classic/win-developer-general similarity index 100% rename from config/intel-warnings/win-developer-general rename to config/intel-warnings/classic/win-developer-general diff --git a/config/intel-warnings/win-general b/config/intel-warnings/classic/win-general similarity index 100% rename from config/intel-warnings/win-general rename to config/intel-warnings/classic/win-general diff --git a/config/intel-warnings/win-ifort-general b/config/intel-warnings/classic/win-ifort-general similarity index 100% rename from config/intel-warnings/win-ifort-general rename to config/intel-warnings/classic/win-ifort-general diff --git a/config/intel-warnings/developer-general b/config/intel-warnings/developer-general deleted file mode 100644 index 861218eecb9..00000000000 --- a/config/intel-warnings/developer-general +++ /dev/null @@ -1,11 +0,0 @@ --Wreorder --Wport --Wstrict-aliasing -# -Winline warnings aren't included here because, for at least -# GNU compilers, this flag appears to conflict specifically with -# the -Og optimization level flag added for Debug and Developer -# builds and will produce warnings about functions not being -# considered for inlining. The flag will be added to the list -# of compiler flags separately if developer warnings are enabled -# and the build type is not Debug or Developer -#-Winline diff --git a/config/intel-warnings/oneapi/developer-general b/config/intel-warnings/oneapi/developer-general new file mode 100644 index 00000000000..122c33d14fa --- /dev/null +++ b/config/intel-warnings/oneapi/developer-general @@ -0,0 +1,2 @@ +-Wreorder +-Wstrict-aliasing diff --git a/config/intel-warnings/oneapi/general b/config/intel-warnings/oneapi/general new file mode 100644 index 00000000000..bd866b6966d --- /dev/null +++ b/config/intel-warnings/oneapi/general @@ -0,0 +1 @@ +-Wall diff --git a/config/intel-warnings/oneapi/ifort-general b/config/intel-warnings/oneapi/ifort-general new file mode 100644 index 00000000000..1644c7cb82f --- /dev/null +++ b/config/intel-warnings/oneapi/ifort-general @@ -0,0 +1 @@ +-warn all diff --git a/config/intel-warnings/oneapi/win-developer-general b/config/intel-warnings/oneapi/win-developer-general new file mode 100644 index 00000000000..ba86a0f1916 --- /dev/null +++ b/config/intel-warnings/oneapi/win-developer-general @@ -0,0 +1,2 @@ +/Wreorder +/Wstrict-aliasing diff --git a/config/intel-warnings/oneapi/win-general b/config/intel-warnings/oneapi/win-general new file mode 100644 index 00000000000..ef54b2b6116 --- /dev/null +++ b/config/intel-warnings/oneapi/win-general @@ -0,0 +1 @@ +/Wall diff --git a/config/intel-warnings/oneapi/win-ifort-general b/config/intel-warnings/oneapi/win-ifort-general new file mode 100644 index 00000000000..a3359590c74 --- /dev/null +++ b/config/intel-warnings/oneapi/win-ifort-general @@ -0,0 +1 @@ +/warn:all From af56339d3bb0ba0076c10f929472f766c9a9a5af Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Thu, 19 Oct 2023 17:23:59 -0500 Subject: [PATCH 028/101] Fixes and adjustments to t_filters_parallel (#3714) Broadcast number of datasets to create in multi-dataset I/O cases so that interference with random number generation doesn't cause mismatches between ranks Set fill time for datasets to "never" by default and adjust on a per-test basis to avoid writing fill values to chunks when it's unnecessary Reduce number of loops run in some tests when performing multi-dataset I/O Fix an issue in the "fill time never" test where data verification could fill if file space reuse causes application buffers to be filled with chosen fill value when reading from datasets with uninitialized storage Skip multi-chunk I/O test configurations for multi-dataset I/O configurations when the TestExpress level is > 1 since those tests can be more stressful on the file system Disable use of persistent file free space management for now since it occasionally runs into an infinite loop in the library's free space management code --- testpar/t_filters_parallel.c | 231 +++++++++++++++++++++++------------ testpar/t_filters_parallel.h | 2 +- 2 files changed, 152 insertions(+), 81 deletions(-) diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index bdfde0972b5..7dfb8bc93b4 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -576,11 +576,21 @@ create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t case USE_MULTIPLE_DATASETS: case USE_MULTIPLE_DATASETS_MIXED_FILTERED: dset_name_ptr = dset_name_multi_buf; - n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2; + + if (MAINPROCESS) + n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2; + + if (mpi_size > 1) + VRFY((MPI_SUCCESS == MPI_Bcast(&n_dsets, 1, MPI_INT, 0, comm)), "MPI_Bcast succeeded"); /* Select between 1 and (n_dsets - 1) datasets to be unfiltered */ if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) { - n_unfiltered = (rand() % (n_dsets - 1)) + 1; + if (MAINPROCESS) + n_unfiltered = (rand() % (n_dsets - 1)) + 1; + + if (mpi_size > 1) + VRFY((MPI_SUCCESS == MPI_Bcast(&n_unfiltered, 1, MPI_INT, 0, comm)), + "MPI_Bcast succeeded"); unfiltered_dcpl = H5Pcopy(dcpl_id); VRFY((unfiltered_dcpl >= 0), "H5Pcopy succeeded"); @@ -621,7 +631,11 @@ create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t * remaining datasets as unfiltered datasets. Otherwise, * randomly determine if a dataset will be unfiltered. */ - unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0); + if (MAINPROCESS) + unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0); + + if (mpi_size > 1) + VRFY((MPI_SUCCESS == MPI_Bcast(&unfiltered, 1, MPI_C_BOOL, 0, comm)), "MPI_Bcast succeeded"); if (unfiltered) { curr_dcpl = unfiltered_dcpl; @@ -1201,6 +1215,12 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); @@ -1485,6 +1505,7 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -1551,7 +1572,12 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group read_bufs[dset_idx] = tmp_buf; } - for (size_t i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ @@ -1590,7 +1616,7 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* Extend the dataset(s) by count[1] chunks in the extensible dimension */ dataset_dims[1] += count[1] * block[1]; @@ -1646,6 +1672,7 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -1712,7 +1739,12 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H read_bufs[dset_idx] = tmp_buf; } - for (size_t i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ @@ -1750,7 +1782,7 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* Extend the dataset(s) by count[1] chunks in the extensible dimension */ dataset_dims[1] += count[1] * block[1]; @@ -1808,6 +1840,7 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -1855,7 +1888,12 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - for (size_t i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Set selected dimensions */ sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS; @@ -1916,7 +1954,7 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* * Extend the dataset(s) by the size of one chunk per rank * in the first extensible dimension. Extend the dataset(s) @@ -1977,6 +2015,7 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -2024,7 +2063,12 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - for (size_t i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Set selected dimensions */ sel_dims[0] = (i + 1); sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; @@ -2085,7 +2129,7 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* Extend the dataset(s) by the size of a chunk in each extensible dimension */ dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS; dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; @@ -2177,6 +2221,12 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); @@ -2341,6 +2391,12 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* + * Since we're doing a no-op write to the dataset, + * make sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); @@ -5250,7 +5306,6 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; size_t data_size, read_buf_size; size_t num_dsets; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; @@ -5368,8 +5423,6 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter open_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - sel_dims[0] = sel_dims[1] = 0; - select_none(num_dsets, dset_ids, fspace_ids); read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); @@ -8275,6 +8328,7 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -8352,7 +8406,12 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, read_bufs[dset_idx] = tmp_buf; } - for (size_t i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = SHRINKING_GROWING_CHUNKS_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { /* Continually write random float data, followed by zeroed-out data */ if (i % 2) @@ -8544,6 +8603,12 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); /* Create datasets depending on the current test mode */ @@ -8748,6 +8813,12 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); /* Create datasets depending on the current test mode */ @@ -8870,6 +8941,9 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* Make sure the fill time is set appropriately */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, FILL_VALUES_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); /* Add test filter to the pipeline */ @@ -9231,6 +9305,9 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* Make sure the fill time is set appropriately */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); /* Add test filter to the pipeline */ @@ -9449,12 +9526,10 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap size_t num_dsets; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID; - int *recvcounts = NULL; - int *displs = NULL; - int mpi_code; + hid_t file_id = H5I_INVALID_HID; + hid_t plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing fill time H5D_FILL_TIME_NEVER"); @@ -9504,7 +9579,7 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); /* Set a fill value */ - fill_value = FILL_VALUES_TEST_FILL_VAL; + fill_value = FILL_TIME_NEVER_TEST_FILL_VAL; VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set"); /* Set fill time of 'never' */ @@ -9519,6 +9594,21 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + /* Allocate buffer for reading entire dataset */ + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } + + /* Allocate buffer of fill values */ + fill_buf = calloc(1, read_buf_size); + VRFY((NULL != fill_buf), "calloc succeeded"); + + for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++) + fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL; + /* * Since we aren't writing fill values to the chunks of the * datasets we just created, close and re-open file to ensure @@ -9538,37 +9628,21 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - /* Allocate buffer for reading entire dataset */ - read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - - for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { - read_bufs[dset_idx] = calloc(1, read_buf_size); - VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); - } - - fill_buf = calloc(1, read_buf_size); - VRFY((NULL != fill_buf), "calloc succeeded"); - - /* Read entire dataset and verify that the fill value isn't returned */ - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - - for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++) - fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL; - /* - * It should be very unlikely for the dataset's random - * values to all be the fill value, so this should be - * a safe comparison in theory. + * Read entire dataset just to try to verify bad behavior doesn't + * occur. Don't attempt to verify the contents of the read buffer(s) + * yet, because there's no guarantee as to what may have been + * read from the dataset. */ - for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) - VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); /* * Write to part of the first chunk in the dataset with - * all ranks, then read the whole dataset and ensure that - * the fill value isn't returned for the unwritten part of - * the chunk, as well as for the rest of the dataset that - * hasn't been written to yet. + * all ranks, then read the whole dataset just to try to + * verify bad behavior doesn't occur. Don't attempt to + * verify the contents of the read buffer(s) yet, because + * there's no guarantee as to what may have been read from + * the dataset. */ count[0] = 1; count[1] = 1; @@ -9609,34 +9683,6 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - /* - * Each MPI rank communicates their written piece of data - * into each other rank's correctness-checking buffer - */ - recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); - - displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); - - for (size_t i = 0; i < (size_t)mpi_size; i++) { - recvcounts[i] = (int)(count[1] * block[1]); - displs[i] = (int)(i * dataset_dims[1]); - } - - for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { - mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, - recvcounts, displs, C_DATATYPE_MPI, comm); - VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - - /* - * It should be very unlikely for the dataset's random - * values to all be the fill value, so this should be - * a safe comparison in theory. - */ - VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded"); - } - for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); @@ -9680,9 +9726,6 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap VRFY((tmp_buf[j] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded"); } - free(displs); - free(recvcounts); - free(fill_buf); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { @@ -9827,7 +9870,13 @@ main(int argc, char **argv) fcpl_id = H5Pcreate(H5P_FILE_CREATE); VRFY((fcpl_id >= 0), "FCPL creation succeeded"); - VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, true, 1) >= 0), + /* + * TODO: Ideally, use persistent free space management. However, + * this occasionally runs into an infinite loop in the library's + * free space management code, so don't persist free space for now + * until that is fixed. + */ + VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, false, 1) >= 0), "H5Pset_file_space_strategy succeeded"); VRFY((h5_fixname(FILENAME[0], fapl_id, filenames[0], sizeof(filenames[0])) != NULL), @@ -9956,6 +10005,19 @@ main(int argc, char **argv) continue; } + /* + * If TestExpress is > 1, only run the multi-chunk I/O + * configuration tests for the 'USE_SINGLE_DATASET' case, + * as the 'USE_MULTIPLE_DATASETS' and 'USE_MULTIPLE_DATASETS_MIXED_FILTERED' + * cases are more stressful on the file system. + */ + if (test_express_level_g > 1) { + if (((test_mode == USE_MULTIPLE_DATASETS) || + (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) && + (chunk_opt != H5FD_MPIO_CHUNK_ONE_IO)) + continue; + } + if (MAINPROCESS) { printf("== Running tests in mode '%s' with filter '%s' using selection I/O mode " "'%s', '%s' and '%s' allocation time ==\n\n", @@ -9992,6 +10054,15 @@ main(int argc, char **argv) VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded"); + /* + * Disable writing of fill values by default. Otherwise, a + * lot of time may be spent writing fill values to chunks + * when they're going to be fully overwritten anyway. + * Individual tests will alter this behavior as necessary. + */ + VRFY((H5Pset_fill_time(dcpl_id, H5D_FILL_TIME_NEVER) >= 0), + "H5Pset_fill_time succeeded"); + /* Create a group to hold all the datasets for this combination * of filter and chunk optimization mode. Then, close the file * again since some tests may need to open the file in a special diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index c0b1db878f9..04d36395dbc 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -444,7 +444,7 @@ typedef struct { #define SHRINKING_GROWING_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) #define SHRINKING_GROWING_CHUNKS_CH_NROWS (SHRINKING_GROWING_CHUNKS_NROWS / mpi_size) #define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size) -#define SHRINKING_GROWING_CHUNKS_NLOOPS 20 +#define SHRINKING_GROWING_CHUNKS_NLOOPS 8 /* Defines for the unshared filtered edge chunks write test */ #define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "unshared_filtered_edge_chunks_write" From 97a6efbf8a648840c2acce4a7ad3e09bac47ecd0 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 19 Oct 2023 17:24:50 -0500 Subject: [PATCH 029/101] Organize CMake config features like libsettings (#3725) --- config/cmake/hdf5-config.cmake.in | 49 +++++++++++++++++++------------ 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index e5bd2406735..c20e18f54a4 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -32,29 +32,40 @@ set (${HDF5_PACKAGE_NAME}_VALID_COMPONENTS #----------------------------------------------------------------------------- # User Options #----------------------------------------------------------------------------- -set (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL @HDF5_ENABLE_PARALLEL@) -set (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN @HDF5_BUILD_FORTRAN@) -set (${HDF5_PACKAGE_NAME}_BUILD_CPP_LIB @HDF5_BUILD_CPP_LIB@) -set (${HDF5_PACKAGE_NAME}_BUILD_JAVA @HDF5_BUILD_JAVA@) -set (${HDF5_PACKAGE_NAME}_BUILD_TOOLS @HDF5_BUILD_TOOLS@) -set (${HDF5_PACKAGE_NAME}_BUILD_HL_LIB @HDF5_BUILD_HL_LIB@) -set (${HDF5_PACKAGE_NAME}_BUILD_HL_GIF_TOOLS @HDF5_BUILD_HL_GIF_TOOLS@) -set (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE @HDF5_ENABLE_THREADSAFE@) +# Languages: +set (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN @HDF5_BUILD_FORTRAN@) +set (${HDF5_PACKAGE_NAME}_BUILD_CPP_LIB @HDF5_BUILD_CPP_LIB@) +set (${HDF5_PACKAGE_NAME}_BUILD_JAVA @HDF5_BUILD_JAVA@) +set (${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN "@HDF5_INSTALL_MOD_FORTRAN@") +#----------------------------------------------------------------------------- +# Features: +set (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL @HDF5_ENABLE_PARALLEL@) +set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES @PARALLEL_FILTERED_WRITES@) +set (${HDF5_PACKAGE_NAME}_LARGE_PARALLEL_IO @LARGE_PARALLEL_IO@) +set (${HDF5_PACKAGE_NAME}_BUILD_HL_LIB @HDF5_BUILD_HL_LIB@) +set (${HDF5_PACKAGE_NAME}_BUILD_DIMENSION_SCALES_WITH_NEW_REF @DIMENSION_SCALES_WITH_NEW_REF@) +set (${HDF5_PACKAGE_NAME}_BUILD_TOOLS @HDF5_BUILD_TOOLS@) +set (${HDF5_PACKAGE_NAME}_BUILD_HL_GIF_TOOLS @HDF5_BUILD_HL_GIF_TOOLS@) +set (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE @HDF5_ENABLE_THREADSAFE@) +set (${HDF5_PACKAGE_NAME}_DEFAULT_API_VERSION "@DEFAULT_API_VERSION@") +set (${HDF5_PACKAGE_NAME}_ENABLE_DEPRECATED_SYMBOLS @HDF5_ENABLE_DEPRECATED_SYMBOLS@) +set (${HDF5_PACKAGE_NAME}_ENABLE_Z_LIB_SUPPORT @HDF5_ENABLE_Z_LIB_SUPPORT@) +set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT @HDF5_ENABLE_SZIP_SUPPORT@) +set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_ENCODING @HDF5_ENABLE_SZIP_ENCODING@) +set (${HDF5_PACKAGE_NAME}_ENABLE_MAP_API @H5_HAVE_MAP_API@) +set (${HDF5_PACKAGE_NAME}_ENABLE_DIRECT_VFD @H5_HAVE_DIRECT@) +set (${HDF5_PACKAGE_NAME}_ENABLE_MIRROR_VFD @H5_HAVE_MIRROR_VFD@) +set (${HDF5_PACKAGE_NAME}_ENABLE_SUBFILING_VFD @HDF5_ENABLE_SUBFILING_VFD@) +set (${HDF5_PACKAGE_NAME}_ENABLE_ROS3_VFD @HDF5_ENABLE_ROS3_VFD@) +set (${HDF5_PACKAGE_NAME}_ENABLE_HDFS_VFD @H5_HAVE_LIBHDFS@) set (${HDF5_PACKAGE_NAME}_ENABLE_PLUGIN_SUPPORT @HDF5_ENABLE_PLUGIN_SUPPORT@) -set (${HDF5_PACKAGE_NAME}_ENABLE_Z_LIB_SUPPORT @HDF5_ENABLE_Z_LIB_SUPPORT@) -set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT @HDF5_ENABLE_SZIP_SUPPORT@) -set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_ENCODING @HDF5_ENABLE_SZIP_ENCODING@) -set (${HDF5_PACKAGE_NAME}_ENABLE_ROS3_VFD @HDF5_ENABLE_ROS3_VFD@) -set (${HDF5_PACKAGE_NAME}_ENABLE_SUBFILING_VFD @HDF5_ENABLE_SUBFILING_VFD@) +#----------------------------------------------------------------------------- set (${HDF5_PACKAGE_NAME}_BUILD_SHARED_LIBS @H5_ENABLE_SHARED_LIB@) set (${HDF5_PACKAGE_NAME}_BUILD_STATIC_LIBS @H5_ENABLE_STATIC_LIB@) set (${HDF5_PACKAGE_NAME}_PACKAGE_EXTLIBS @HDF5_PACKAGE_EXTLIBS@) -set (${HDF5_PACKAGE_NAME}_EXPORT_LIBRARIES @HDF5_LIBRARIES_TO_EXPORT@) -set (${HDF5_PACKAGE_NAME}_ARCHITECTURE "@CMAKE_GENERATOR_ARCHITECTURE@") -set (${HDF5_PACKAGE_NAME}_TOOLSET "@CMAKE_GENERATOR_TOOLSET@") -set (${HDF5_PACKAGE_NAME}_DEFAULT_API_VERSION "@DEFAULT_API_VERSION@") -set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES @PARALLEL_FILTERED_WRITES@) -set (${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN "@HDF5_INSTALL_MOD_FORTRAN@") +set (${HDF5_PACKAGE_NAME}_EXPORT_LIBRARIES @HDF5_LIBRARIES_TO_EXPORT@) +set (${HDF5_PACKAGE_NAME}_ARCHITECTURE "@CMAKE_GENERATOR_ARCHITECTURE@") +set (${HDF5_PACKAGE_NAME}_TOOLSET "@CMAKE_GENERATOR_TOOLSET@") #----------------------------------------------------------------------------- # Dependencies From b916ce2419da45f7a51503e3bf2774e71f7db815 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Thu, 19 Oct 2023 21:19:52 -0700 Subject: [PATCH 030/101] Suppress cast-qual warning in H5TB Fortran wrapper (#3728) This interface is fundamentally broken, const-wise. --- hl/fortran/src/H5TBfc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hl/fortran/src/H5TBfc.c b/hl/fortran/src/H5TBfc.c index 9c257777b2f..d339def6f66 100644 --- a/hl/fortran/src/H5TBfc.c +++ b/hl/fortran/src/H5TBfc.c @@ -92,10 +92,12 @@ h5tbmake_table_c(size_t_f *namelen1, _fcd name1, hid_t_f *loc_id, size_t_f *name /* * call H5TBmake_table function. */ + H5_GCC_CLANG_DIAG_OFF("cast-qual") if (H5TBmake_table(c_name1, (hid_t)*loc_id, c_name, c_nfields, (hsize_t)*nrecords, (size_t)*type_size, (const char **)c_field_names, c_field_offset, c_field_types, (hsize_t)*chunk_size, NULL, *compress, NULL) < 0) HGOTO_DONE(FAIL); + H5_GCC_CLANG_DIAG_ON("cast-qual") done: if (c_name) @@ -193,10 +195,12 @@ h5tbmake_table_ptr_c(size_t_f *namelen1, _fcd name1, hid_t_f *loc_id, size_t_f * /* * call H5TBmake_table function. */ + H5_GCC_CLANG_DIAG_OFF("cast-qual") if (H5TBmake_table(c_name1, (hid_t)*loc_id, c_name, c_nfields, (hsize_t)*nrecords, (size_t)*type_size, (const char **)c_field_names, c_field_offset, c_field_types, (hsize_t)*chunk_size, fill_data, *compress, data) < 0) HGOTO_DONE(FAIL); + H5_GCC_CLANG_DIAG_ON("cast-qual") done: if (c_name) From 630d6e27c956859ff5c0d7a61df3c095fbd7c86b Mon Sep 17 00:00:00 2001 From: Neil Fortner Date: Fri, 20 Oct 2023 12:32:17 -0500 Subject: [PATCH 031/101] Add new API function H5Pget_actual_select_io_mode() (#2974) This function allows the user to determine if the library performed selection I/O, vector I/O, or scalar (legacy) I/O during the last HDF5 operation performed with the provided DXPL. Expanded existing tests to check this functionality. --- doxygen/examples/tables/propertyLists.dox | 4 + release_docs/RELEASE.txt | 6 + src/H5CX.c | 101 +++- src/H5CXprivate.h | 2 + src/H5Dprivate.h | 19 +- src/H5FDint.c | 155 +++++- src/H5M.c | 21 - src/H5Pdxpl.c | 46 +- src/H5Ppublic.h | 64 ++- test/select_io_dset.c | 597 +++++++++++--------- testpar/t_filters_parallel.c | 614 +++++++++++++-------- testpar/t_select_io_dset.c | 641 +++++++++++++--------- 12 files changed, 1472 insertions(+), 798 deletions(-) diff --git a/doxygen/examples/tables/propertyLists.dox b/doxygen/examples/tables/propertyLists.dox index 2f74c03770f..340e13c26a5 100644 --- a/doxygen/examples/tables/propertyLists.dox +++ b/doxygen/examples/tables/propertyLists.dox @@ -711,6 +711,10 @@ of the library for reading or writing the actual data. Gets the cause for not performing selection or vector I/O on the last parallel I/O call. +#H5Pget_actual_selection_io_mode +Gets the type(s) (scalar, vector, selection) of raw data I/O performed on the last I/O call. + + #H5Pset_modify_write_buf/#H5Pget_modify_write_buf Sets/gets a flag allowing the library to modify the contents of the write buffer. diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index d2411d9a111..0239a9e356f 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -228,6 +228,12 @@ New Features Library: -------- + - Added new API function H5Pget_actual_selection_io_mode() + + This function allows the user to determine if the library performed + selection I/O, vector I/O, or scalar (legacy) I/O during the last HDF5 + operation performed with the provided DXPL. + - Added support for in-place type conversion in most cases In-place type conversion allows the library to perform type conversion diff --git a/src/H5CX.c b/src/H5CX.c index b3b2fca41b9..c46c58af35e 100644 --- a/src/H5CX.c +++ b/src/H5CX.c @@ -299,6 +299,11 @@ typedef struct H5CX_t { bool no_selection_io_cause_set; /* Whether reason for not performing selection I/O is set */ bool no_selection_io_cause_valid; /* Whether reason for not performing selection I/O is valid */ + uint32_t + actual_selection_io_mode; /* Actual selection I/O mode used (H5D_ACTUAL_SELECTION_IO_MODE_NAME) */ + hbool_t actual_selection_io_mode_set; /* Whether actual selection I/O mode is set */ + hbool_t actual_selection_io_mode_valid; /* Whether actual selection I/O mode is valid */ + /* Cached LCPL properties */ H5T_cset_t encoding; /* Link name character encoding */ bool encoding_valid; /* Whether link name character encoding is valid */ @@ -380,6 +385,8 @@ typedef struct H5CX_dxpl_cache_t { H5D_selection_io_mode_t selection_io_mode; /* Selection I/O mode (H5D_XFER_SELECTION_IO_MODE_NAME) */ uint32_t no_selection_io_cause; /* Reasons for not performing selection I/O (H5D_XFER_NO_SELECTION_IO_CAUSE_NAME) */ + uint32_t actual_selection_io_mode; /* Actual selection I/O mode + (H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME) */ bool modify_write_buf; /* Whether the library can modify write buffers */ } H5CX_dxpl_cache_t; @@ -571,13 +578,18 @@ H5CX_init(void) /* Get the selection I/O mode */ if (H5P_get(dx_plist, H5D_XFER_SELECTION_IO_MODE_NAME, &H5CX_def_dxpl_cache.selection_io_mode) < 0) - HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve parallel transfer method"); + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve selection I/O mode"); /* Get the local & global reasons for breaking selection I/O values */ if (H5P_get(dx_plist, H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, &H5CX_def_dxpl_cache.no_selection_io_cause) < 0) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve cause for no selection I/O"); + /* Get the actual selection I/O mode */ + if (H5P_get(dx_plist, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, + &H5CX_def_dxpl_cache.actual_selection_io_mode) < 0) + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve actual selection I/O mode"); + /* Get the modify write buffer property */ if (H5P_get(dx_plist, H5D_XFER_MODIFY_WRITE_BUF_NAME, &H5CX_def_dxpl_cache.modify_write_buf) < 0) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve modify write buffer property"); @@ -2514,6 +2526,47 @@ H5CX_get_no_selection_io_cause(uint32_t *no_selection_io_cause) FUNC_LEAVE_NOAPI(ret_value) } /* end H5CX_get_no_selection_io_cause() */ +/*------------------------------------------------------------------------- + * Function: H5CX_get_actual_selection_io_mode + * + * Purpose: Retrieves the actual I/O mode (scalar, vector, and/or selection) for the current API call + *context. + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5CX_get_actual_selection_io_mode(uint32_t *actual_selection_io_mode) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + assert(actual_selection_io_mode); + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + assert(head && *head); + assert(H5P_DEFAULT != (*head)->ctx.dxpl_id); + + /* This property is a special case - we want to wipe out any previous setting. Copy the default setting + * if it has not been set yet. */ + if ((*head)->ctx.dxpl_id != H5P_DATASET_XFER_DEFAULT && !(*head)->ctx.actual_selection_io_mode_set && + !(*head)->ctx.actual_selection_io_mode_valid) { + (*head)->ctx.actual_selection_io_mode = H5CX_def_dxpl_cache.actual_selection_io_mode; + (*head)->ctx.actual_selection_io_mode_set = true; + } + H5CX_RETRIEVE_PROP_VALID_SET(dxpl, H5P_DATASET_XFER_DEFAULT, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, + actual_selection_io_mode) + + /* Get the value */ + *actual_selection_io_mode = (*head)->ctx.actual_selection_io_mode; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5CX_get_actual_selection_io_mode() */ + /*------------------------------------------------------------------------- * Function: H5CX_get_modify_write_buf * @@ -3443,7 +3496,7 @@ H5CX_test_set_mpio_coll_rank0_bcast(bool mpio_coll_rank0_bcast) #endif /* H5_HAVE_PARALLEL */ /*------------------------------------------------------------------------- - * Function: H5CX_set_no_selecction_io_cause + * Function: H5CX_set_no_selection_io_cause * * Purpose: Sets the reason for not performing selection I/O for * the current API call context. @@ -3472,7 +3525,39 @@ H5CX_set_no_selection_io_cause(uint32_t no_selection_io_cause) } /* end if */ FUNC_LEAVE_NOAPI_VOID -} /* end H5CX_set_no_selectiion_io_cause() */ +} /* end H5CX_set_no_selection_io_cause() */ + +/*------------------------------------------------------------------------- + * Function: H5CX_set_actual_selection_io_mode + * + * Purpose: Sets the actual selection I/O mode for the current API + * call context. + * + * Return: + * + *------------------------------------------------------------------------- + */ +void +H5CX_set_actual_selection_io_mode(uint32_t actual_selection_io_mode) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Sanity checks */ + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + assert(head && *head); + assert((*head)->ctx.dxpl_id != H5P_DEFAULT); + + /* If we're using the default DXPL, don't modify it */ + if ((*head)->ctx.dxpl_id != H5P_DATASET_XFER_DEFAULT) { + /* Cache the value for later, marking it to set in DXPL when context popped */ + (*head)->ctx.actual_selection_io_mode = actual_selection_io_mode; + (*head)->ctx.actual_selection_io_mode_set = true; + } + + FUNC_LEAVE_NOAPI_VOID +} /* end H5CX_set_actual_selection_io_mode() */ /*------------------------------------------------------------------------- * Function: H5CX_get_ohdr_flags @@ -3529,7 +3614,17 @@ H5CX__pop_common(bool update_dxpl_props) /* Check for cached DXPL properties to return to application */ if (update_dxpl_props) { + /* actual_selection_io_mode is a special case - we always want to set it in the property list even if + * it was never set by the library, in that case it indicates no I/O was performed and we don't want + * to leave the (possibly incorrect) old value in the property list, so set from the default property + * list */ + if ((*head)->ctx.dxpl_id != H5P_DATASET_XFER_DEFAULT && !(*head)->ctx.actual_selection_io_mode_set) { + (*head)->ctx.actual_selection_io_mode = H5CX_def_dxpl_cache.actual_selection_io_mode; + (*head)->ctx.actual_selection_io_mode_set = true; + } + H5CX_SET_PROP(H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, no_selection_io_cause) + H5CX_SET_PROP(H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, actual_selection_io_mode) #ifdef H5_HAVE_PARALLEL H5CX_SET_PROP(H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, mpio_actual_chunk_opt) H5CX_SET_PROP(H5D_MPIO_ACTUAL_IO_MODE_NAME, mpio_actual_io_mode) diff --git a/src/H5CXprivate.h b/src/H5CXprivate.h index aa6883b62ef..76812ee55ef 100644 --- a/src/H5CXprivate.h +++ b/src/H5CXprivate.h @@ -116,6 +116,7 @@ H5_DLL herr_t H5CX_get_vlen_alloc_info(H5T_vlen_alloc_info_t *vl_alloc_info); H5_DLL herr_t H5CX_get_dt_conv_cb(H5T_conv_cb_t *cb_struct); H5_DLL herr_t H5CX_get_selection_io_mode(H5D_selection_io_mode_t *selection_io_mode); H5_DLL herr_t H5CX_get_no_selection_io_cause(uint32_t *no_selection_io_cause); +H5_DLL herr_t H5CX_get_actual_selection_io_mode(uint32_t *actual_selection_io_mode); H5_DLL herr_t H5CX_get_modify_write_buf(bool *modify_write_buf); /* "Getter" routines for LCPL properties cached in API context */ @@ -162,6 +163,7 @@ H5_DLL herr_t H5CX_init(void); /* "Setter" routines for cached DXPL properties that must be returned to application */ H5_DLL void H5CX_set_no_selection_io_cause(uint32_t no_selection_io_cause); +H5_DLL void H5CX_set_actual_selection_io_mode(uint32_t actual_selection_io_mode); #ifdef H5_HAVE_PARALLEL H5_DLL void H5CX_set_mpio_actual_chunk_opt(H5D_mpio_actual_chunk_opt_mode_t chunk_opt); diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index 118c6cd4224..fa8b0770359 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -78,15 +78,16 @@ #define H5D_MPIO_LOCAL_NO_COLLECTIVE_CAUSE_NAME \ "local_no_collective_cause" /* cause of broken collective I/O in each process */ #define H5D_MPIO_GLOBAL_NO_COLLECTIVE_CAUSE_NAME \ - "global_no_collective_cause" /* cause of broken collective I/O in all processes */ -#define H5D_XFER_EDC_NAME "err_detect" /* EDC */ -#define H5D_XFER_FILTER_CB_NAME "filter_cb" /* Filter callback function */ -#define H5D_XFER_CONV_CB_NAME "type_conv_cb" /* Type conversion callback function */ -#define H5D_XFER_XFORM_NAME "data_transform" /* Data transform */ -#define H5D_XFER_DSET_IO_SEL_NAME "dset_io_selection" /* Dataset I/O selection */ -#define H5D_XFER_SELECTION_IO_MODE_NAME "selection_io_mode" /* Selection I/O mode */ -#define H5D_XFER_NO_SELECTION_IO_CAUSE_NAME "no_selection_io_cause" /* Cause for no selection I/O */ -#define H5D_XFER_MODIFY_WRITE_BUF_NAME "modify_write_buf" /* Modify write buffers */ + "global_no_collective_cause" /* cause of broken collective I/O in all processes */ +#define H5D_XFER_EDC_NAME "err_detect" /* EDC */ +#define H5D_XFER_FILTER_CB_NAME "filter_cb" /* Filter callback function */ +#define H5D_XFER_CONV_CB_NAME "type_conv_cb" /* Type conversion callback function */ +#define H5D_XFER_XFORM_NAME "data_transform" /* Data transform */ +#define H5D_XFER_DSET_IO_SEL_NAME "dset_io_selection" /* Dataset I/O selection */ +#define H5D_XFER_SELECTION_IO_MODE_NAME "selection_io_mode" /* Selection I/O mode */ +#define H5D_XFER_NO_SELECTION_IO_CAUSE_NAME "no_selection_io_cause" /* Cause for no selection I/O */ +#define H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME "actual_selection_io_mode" /* Actual selection I/O mode */ +#define H5D_XFER_MODIFY_WRITE_BUF_NAME "modify_write_buf" /* Modify write buffers */ #ifdef H5_HAVE_INSTRUMENTED_LIBRARY /* Collective chunk instrumentation properties */ #define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard" diff --git a/src/H5FDint.c b/src/H5FDint.c index 082b6021332..5d3a80212ef 100644 --- a/src/H5FDint.c +++ b/src/H5FDint.c @@ -212,8 +212,9 @@ H5FD_locate_signature(H5FD_t *file, haddr_t *sig_addr) herr_t H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/) { - hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ - herr_t ret_value = SUCCEED; /* Return value */ + hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + uint32_t actual_selection_io_mode; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -257,6 +258,13 @@ H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /* if ((file->cls->read)(file, type, dxpl_id, addr + file->base_addr, size, buf) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed"); + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_read() */ @@ -273,9 +281,10 @@ H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /* herr_t H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf) { - hid_t dxpl_id; /* DXPL for operation */ - haddr_t eoa = HADDR_UNDEF; /* EOA for file */ - herr_t ret_value = SUCCEED; /* Return value */ + hid_t dxpl_id; /* DXPL for operation */ + haddr_t eoa = HADDR_UNDEF; /* EOA for file */ + uint32_t actual_selection_io_mode; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -308,6 +317,13 @@ H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, const void if ((file->cls->write)(file, type, dxpl_id, addr + file->base_addr, size, buf) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write request failed"); + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_write() */ @@ -360,6 +376,7 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs size_t size = 0; H5FD_mem_t type = H5FD_MEM_DEFAULT; hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + hbool_t is_raw = FALSE; /* Does this include raw data */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -441,6 +458,10 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs else { type = types[i]; + + /* Check for raw data operation */ + if (type == H5FD_MEM_DRAW) + is_raw = TRUE; } } @@ -455,13 +476,27 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs (unsigned long long)eoa); } } + else + /* We must still check if this is a raw data read */ + for (i = 0; i < count && types[i] != H5FD_MEM_NOLIST; i++) + if (types[i] == H5FD_MEM_DRAW) { + is_raw = true; + break; + } /* if the underlying VFD supports vector read, make the call */ if (file->cls->read_vector) { - if ((file->cls->read_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0) - HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed"); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + uint32_t actual_selection_io_mode; + + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { @@ -471,6 +506,7 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs extend_sizes = false; extend_types = false; uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; for (i = 0; i < count; i++) { @@ -512,6 +548,13 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -575,6 +618,7 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr H5FD_mem_t type = H5FD_MEM_DEFAULT; hid_t dxpl_id; /* DXPL for operation */ haddr_t eoa = HADDR_UNDEF; /* EOA for file */ + hbool_t is_raw = FALSE; /* Does this include raw data */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -646,6 +690,10 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr else { type = types[i]; + + /* Check for raw data operation */ + if (type == H5FD_MEM_DRAW) + is_raw = true; } } @@ -663,10 +711,17 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr /* if the underlying VFD supports vector write, make the call */ if (file->cls->write_vector) { - if ((file->cls->write_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0) - HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed"); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + uint32_t actual_selection_io_mode; + + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { /* otherwise, implement the vector write as a sequence of regular @@ -675,6 +730,7 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr extend_sizes = false; extend_types = false; uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; for (i = 0; i < count; i++) { @@ -716,6 +772,13 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -996,18 +1059,35 @@ H5FD__read_selection_translate(uint32_t skip_vector_cb, H5FD_t *file, H5FD_mem_t /* Issue vector read call if appropriate */ if (use_vector) { + uint32_t actual_selection_io_mode; + H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t); if ((file->cls->read_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW && count > 0) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } - else { + else if (count > 0) { uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -1161,6 +1241,8 @@ H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_s /* if the underlying VFD supports selection read, make the call */ if (file->cls->read_selection) { + uint32_t actual_selection_io_mode; + /* Allocate array of space IDs if necessary, otherwise use local * buffers */ if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) { @@ -1186,6 +1268,13 @@ H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_s if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else /* Otherwise, implement the selection read as a sequence of regular @@ -1337,9 +1426,18 @@ H5FD_read_selection_id(uint32_t skip_cb, H5FD_t *file, H5FD_mem_t type, uint32_t /* if the underlying VFD supports selection read, make the call */ if (!skip_selection_cb && file->cls->read_selection) { + uint32_t actual_selection_io_mode; + if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { /* Otherwise, implement the selection read as a sequence of regular @@ -1653,18 +1751,35 @@ H5FD__write_selection_translate(uint32_t skip_vector_cb, H5FD_t *file, H5FD_mem_ /* Issue vector write call if appropriate */ if (use_vector) { + uint32_t actual_selection_io_mode; + H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t); if ((file->cls->write_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW && count > 0) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } - else { + else if (count > 0) { uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -1810,6 +1925,8 @@ H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_ /* if the underlying VFD supports selection write, make the call */ if (file->cls->write_selection) { + uint32_t actual_selection_io_mode; + /* Allocate array of space IDs if necessary, otherwise use local * buffers */ if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) { @@ -1835,6 +1952,13 @@ H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_ if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else /* Otherwise, implement the selection write as a sequence of regular @@ -1979,9 +2103,18 @@ H5FD_write_selection_id(uint32_t skip_cb, H5FD_t *file, H5FD_mem_t type, uint32_ /* if the underlying VFD supports selection write, make the call */ if (!skip_selection_cb && file->cls->write_selection) { + uint32_t actual_selection_io_mode; + if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { /* Otherwise, implement the selection write as a sequence of regular diff --git a/src/H5M.c b/src/H5M.c index e2fd2025a9b..f59e02fa3ee 100644 --- a/src/H5M.c +++ b/src/H5M.c @@ -893,9 +893,6 @@ H5Mget_count(hid_t map_id, hsize_t *count /*out*/, hid_t dxpl_id) else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.get.get_type = H5VL_MAP_GET_COUNT; map_args.get.args.get_count.count = 0; @@ -952,9 +949,6 @@ H5M__put_api_common(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.put.key_mem_type_id = key_mem_type_id; map_args.put.key = key; @@ -1087,9 +1081,6 @@ H5M__get_api_common(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.get_val.key_mem_type_id = key_mem_type_id; map_args.get_val.key = key; @@ -1225,9 +1216,6 @@ H5Mexists(hid_t map_id, hid_t key_mem_type_id, const void *key, hbool_t *exists, else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.exists.key_mem_type_id = key_mem_type_id; map_args.exists.key = key; @@ -1305,9 +1293,6 @@ H5Miterate(hid_t map_id, hsize_t *idx, hid_t key_mem_type_id, H5M_iterate_t op, else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.specific.specific_type = H5VL_MAP_ITER; map_args.specific.args.iterate.loc_params.type = H5VL_OBJECT_BY_SELF; @@ -1394,9 +1379,6 @@ H5Miterate_by_name(hid_t loc_id, const char *map_name, hsize_t *idx, hid_t key_m else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.specific.specific_type = H5VL_MAP_ITER; map_args.specific.args.iterate.loc_params.type = H5VL_OBJECT_BY_NAME; @@ -1462,9 +1444,6 @@ H5Mdelete(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t dxpl_id) else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.specific.specific_type = H5VL_MAP_DELETE; map_args.specific.args.del.loc_params.type = H5VL_OBJECT_BY_SELF; diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index b6130f5117d..9adb2d60d3f 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -175,6 +175,9 @@ /* Definitions for cause of no selection I/O property */ #define H5D_XFER_NO_SELECTION_IO_CAUSE_SIZE sizeof(uint32_t) #define H5D_XFER_NO_SELECTION_IO_CAUSE_DEF 0 +/* Definitions for actual selection I/O mode property */ +#define H5D_XFER_ACTUAL_SELECTION_IO_MODE_SIZE sizeof(uint32_t) +#define H5D_XFER_ACTUAL_SELECTION_IO_MODE_DEF 0 /* Definitions for modify write buffer property */ #define H5D_XFER_MODIFY_WRITE_BUF_SIZE sizeof(bool) #define H5D_XFER_MODIFY_WRITE_BUF_DEF false @@ -295,7 +298,8 @@ static const H5S_t *H5D_def_dset_io_sel_g = H5D_XFER_DSET_IO_SEL_DEF; /* Default value for dataset I/O selection */ static const H5D_selection_io_mode_t H5D_def_selection_io_mode_g = H5D_XFER_SELECTION_IO_MODE_DEF; static const uint32_t H5D_def_no_selection_io_cause_g = H5D_XFER_NO_SELECTION_IO_CAUSE_DEF; -static const bool H5D_def_modify_write_buf_g = H5D_XFER_MODIFY_WRITE_BUF_DEF; +static const uint32_t H5D_def_actual_selection_io_mode_g = H5D_XFER_ACTUAL_SELECTION_IO_MODE_DEF; +static const bool H5D_def_modify_write_buf_g = H5D_XFER_MODIFY_WRITE_BUF_DEF; /*------------------------------------------------------------------------- * Function: H5P__dxfr_reg_prop @@ -470,6 +474,13 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass) NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class"); + /* Register the actual selection I/O mode property */ + /* (Note: this property should not have an encode/decode callback) */ + if (H5P__register_real(pclass, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, + H5D_XFER_ACTUAL_SELECTION_IO_MODE_SIZE, &H5D_def_actual_selection_io_mode_g, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class"); + /* Register the modify write buffer property */ if (H5P__register_real(pclass, H5D_XFER_MODIFY_WRITE_BUF_NAME, H5D_XFER_MODIFY_WRITE_BUF_SIZE, &H5D_def_modify_write_buf_g, NULL, NULL, NULL, H5D_XFER_MODIFY_WRITE_BUF_ENC, @@ -2456,6 +2467,39 @@ H5Pget_no_selection_io_cause(hid_t plist_id, uint32_t *no_selection_io_cause /*o FUNC_LEAVE_API(ret_value) } /* end H5Pget_no_selection_io_cause() */ +/*------------------------------------------------------------------------- + * Function: H5Pget_actual_selection_io_mode + * + * Purpose: Retrieves actual selection I/O mode + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * April 27, 2023 + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_actual_selection_io_mode(hid_t plist_id, uint32_t *actual_selection_io_mode /*out*/) +{ + H5P_genplist_t *plist; + herr_t ret_value = SUCCEED; /* return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "ix", plist_id, actual_selection_io_mode); + + /* Get the plist structure */ + if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "can't find object for ID"); + + /* Return values */ + if (actual_selection_io_mode) + if (H5P_get(plist, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, actual_selection_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get actual_selection_io_mode value"); + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_actual_selection_io_mode() */ + /*------------------------------------------------------------------------- * Function: H5P__dxfr_modify_write_buf_enc * diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index a58d97ffd22..d822925bc0d 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -420,6 +420,13 @@ typedef enum H5D_selection_io_mode_t { } H5D_selection_io_mode_t; //! +/** + * Causes for H5Pget_actual_selection_io_mode() property + */ +#define H5D_SCALAR_IO (0x0001u) /**< Scalar (or legacy MPIO) I/O was performed */ +#define H5D_VECTOR_IO (0x0002u) /**< Vector I/O was performed */ +#define H5D_SELECTION_IO (0x0004u) /**< Selection I/O was performed */ + /********************/ /* Public Variables */ /********************/ @@ -5811,7 +5818,7 @@ H5_DLL int H5Pget_external_count(hid_t plist_id); * \note H5Pget_fill_time() is designed to work in coordination with the * dataset fill value and dataset storage allocation time properties, * retrieved with the functions H5Pget_fill_value() and - * H5Pget_alloc_time(). + * H5Pget_alloc_time().type == H5FD_MEM_DRAW * * \since 1.6.0 * @@ -8299,6 +8306,61 @@ H5_DLL herr_t H5Pget_selection_io(hid_t plist_id, H5D_selection_io_mode_t *selec */ H5_DLL herr_t H5Pget_no_selection_io_cause(hid_t plist_id, uint32_t *no_selection_io_cause); +/** + * \ingroup DXPL + * + * \brief Retrieves the type(s) of I/O that HDF5 actually performed on raw data + * during the last I/O call + * + * \dxpl_id{plist_id} + * \param[out] actual_selection_io_mode A bitwise set value indicating the + * type(s) of I/O performed + * \return \herr_t + * + * \par Motivation: + * A user can request selection I/O to be performed via a data transfer + * property list (DXPL). This can be used to enable collective I/O with + * type conversion, or with custom VFDs that support vector or selection + * I/O. However, there are conditions that can cause HDF5 to forgo + * selection or vector I/O and perform legacy (scalar) I/O instead. + * This function allows the user to determine which type or types of + * I/O were actually performed. + * + * \details H5Pget_actual_selection_io_mode() allows the user to determine which + * type(s) of I/O were actually performed on raw data during the last + * I/O operation which used \p plist_id. This property is set after + * all I/O is completed; if I/O fails, it will not be set. + * + * H5Pget_no_selection_io_cause() can be used to determine the reason + * why selection or vector I/O was not performed. + * + * Valid bitflags returned in \p actual_selection_io_mode are listed + * as follows. + * + * - #H5D_SCALAR_IO + * Scalar (or legacy MPIO) I/O was performed + * - #H5D_VECTOR_IO + * Vector I/O was performed + * - #H5D_SELECTION_IO + * Selection I/O was performed + * + * 0 or more of these can be present in \p actual_selection_io_mode in + * a bitwise fashion, since a single operation can trigger multiple + * instances of I/O, possibly with different types. A value of \p 0 + * indicates no raw data I/O was performed during the operation. + * + * Be aware that this function will only include raw data I/O performed + * to/from disk as part of the last I/O operation. Any metadata + * I/O, including attribute and compact dataset I/O, is disregarded. + * It is also possible that data was cached in the dataset chunk cache + * or sieve buffer, which may prevent I/O from hitting the disk, and + * thereby prevent it from being counted by this function. + * + * \since 1.14.3 + * + */ +H5_DLL herr_t H5Pget_actual_selection_io_mode(hid_t plist_id, uint32_t *actual_selection_io_mode); + /** * * \ingroup DXPL diff --git a/test/select_io_dset.c b/test/select_io_dset.c index 79449aac070..33b1c843d09 100644 --- a/test/select_io_dset.c +++ b/test/select_io_dset.c @@ -104,13 +104,28 @@ typedef enum { #define TEST_TCONV_BUF_TOO_SMALL 0x100 #define TEST_IN_PLACE_TCONV 0x200 +static herr_t +check_actual_selection_io_mode(hid_t dxpl, uint32_t sel_io_mode_expected) +{ + uint32_t actual_sel_io_mode; + + if (H5Pget_actual_selection_io_mode(dxpl, &actual_sel_io_mode) < 0) + TEST_ERROR; + if (actual_sel_io_mode != sel_io_mode_expected) + TEST_ERROR; + + return SUCCEED; +error: + return FAIL; +} + /* * Case 1: single dataset read/write, no type conversion (null case) * --create dataset with H5T_NATIVE_INT * --write/read dataset with H5T_NATIVE_INT */ static herr_t -test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_no_type_conv(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -130,14 +145,14 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -146,7 +161,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -156,23 +171,23 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -180,7 +195,11 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Write data to the dataset with/without data transform */ if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -188,7 +207,11 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Read data from the dataset without data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data or transformed data read */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -203,7 +226,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Read the data from the dataset with data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read is transformed a second time */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -216,15 +239,15 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -252,7 +275,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) * --read again with H5T_STD_I32BE */ static herr_t -test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_no_size_change_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -268,39 +291,39 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) int fillvalue = (-1); if ((wbuf = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && (wbuf_bak = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) - FAIL_STACK_ERROR; + TEST_ERROR; if ((rbuf = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillvalue) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -309,7 +332,7 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_STD_I32BE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -325,7 +348,11 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Write the data to the dataset with little endian */ if (H5Dwrite(did, H5T_STD_I32LE, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -333,7 +360,11 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read the data from the dataset with little endian */ if (H5Dread(did, H5T_STD_I32LE, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data read little endian */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -347,7 +378,7 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read the data from the dataset with big endian */ if (H5Dread(did, H5T_STD_I32BE, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read in big endian */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -360,13 +391,13 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; free(wbuf); free(wbuf_bak); @@ -405,7 +436,7 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * */ static herr_t -test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_larger_mem_type_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -425,14 +456,14 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -441,7 +472,7 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -451,23 +482,23 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -475,7 +506,11 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Write data to the dataset with/without data transform set in dxpl */ if (H5Dwrite(did, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -483,7 +518,11 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Read the data from the dataset without data transform in dxpl */ if (H5Dread(did, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data or transformed data read */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -498,7 +537,7 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Read data from the dataset with data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read is transformed a second time */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -511,15 +550,15 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -547,7 +586,7 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign * --read dataset with H5T_NATIVE_SHORT */ static herr_t -test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_smaller_mem_type_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -567,14 +606,14 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -583,7 +622,7 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Create 1d chunked dataset with/without data transform */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -593,23 +632,23 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) { if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Copy wbuf if the library will be modifying it */ @@ -618,7 +657,11 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Write data to the dataset with/without data transform in dxpl */ if (H5Dwrite(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -626,7 +669,11 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Read data from the dataset without data transform in dxpl */ if (H5Dread(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data or transformed data read */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -641,7 +688,7 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Read data from the dataset with data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read is transformed a second time */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -654,15 +701,15 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -730,55 +777,55 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Allocate buffers for datasets */ if (NULL == (s1_wbuf = (s1_t *)malloc(sizeof(s1_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s1_wbuf_bak = (s1_t *)malloc(sizeof(s1_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (s1_rbuf = (s1_t *)malloc(sizeof(s1_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (s2_wbuf = (s2_t *)malloc(sizeof(s2_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s2_wbuf_bak = (s2_t *)malloc(sizeof(s2_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (s2_rbuf = (s2_t *)malloc(sizeof(s2_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create the memory data type */ if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_fill_value(dcpl, s1_tid, &fillvalue) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Case 5(a) */ @@ -789,7 +836,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -805,7 +852,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Write all the data to the dataset */ if (H5Dwrite(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, s1_wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -813,7 +860,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read all the data from the dataset */ if (H5Dread(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -838,12 +885,12 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -851,7 +898,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Write s1_wbuf to the dataset with only subset members in ss_tid */ if (H5Dwrite(did, ss_ac_tid, H5S_ALL, H5S_ALL, dxpl, s1_wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -859,7 +906,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read the whole compound back */ if (H5Dread(did, ss_ac_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify the compound fields have the correct (old or new) values */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -884,16 +931,16 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read the dataset: will read only what is set in */ if (H5Dread(did, ss_bc_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -915,13 +962,13 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * --1 smaller mem type */ if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LONG) < 0 || H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Update s2_wbuf with unique values */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -936,7 +983,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) memcpy(s2_wbuf_bak, s2_wbuf, sizeof(s2_t) * DSET_SELECT_DIM); if (H5Dwrite(did, s2_tid, H5S_ALL, H5S_ALL, dxpl, s2_wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -959,21 +1006,21 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s1_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s2_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(ss_ac_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(ss_bc_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Release buffers */ free(s1_wbuf); @@ -1030,7 +1077,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Datatype for all datasets: H5T_NATIVE_LONG */ static herr_t -test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_multi_dsets_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { size_t ndsets; int i, j; @@ -1074,41 +1121,41 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set up file space ids, mem space ids, and dataset ids */ for (i = 0; i < (int)ndsets; i++) { if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Generate dataset name */ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i, @@ -1118,31 +1165,31 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], ((HDrandom() % 2) ? H5T_NATIVE_LONG : H5T_NATIVE_INT), file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); /* Allocate buffers for all datasets */ if (NULL == (total_wbuf = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (total_wbuf_bak = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_trans_wbuf = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_rbuf = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); if (NULL == (total_lwbuf = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (total_lwbuf_bak = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_trans_lwbuf = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_lrbuf = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -1175,6 +1222,10 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) TEST_ERROR; + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; + /* Restore wbuf from backup if the library modified it */ if (mwbuf) memcpy(total_wbuf, total_wbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); @@ -1183,6 +1234,10 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, ntrans_dxpl, rbufs) < 0) TEST_ERROR; + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; + /* Verify */ for (i = 0; i < (int)ndsets; i++) for (j = 0; j < DSET_SELECT_DIM; j++) @@ -1199,6 +1254,10 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) TEST_ERROR; + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; + /* Verify */ for (i = 0; i < (int)ndsets; i++) for (j = 0; j < DSET_SELECT_DIM; j++) @@ -1260,19 +1319,19 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m } if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(mem_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } free(total_wbuf); @@ -1315,7 +1374,7 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (total_lrbuf) free(total_lrbuf); if (total_trans_lwbuf) - free(total_lrbuf); + free(total_trans_lwbuf); return FAIL; @@ -1404,41 +1463,41 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Create the memory data type */ if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Generate dataset name */ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i, @@ -1447,7 +1506,7 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create ith dataset */ if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); @@ -1530,12 +1589,12 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Untouched memory and file spaces for other datasets */ for (i = 0; i < (int)ndsets; i++) { @@ -1603,18 +1662,18 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Reset memory and file space for dataset */ if (H5Sselect_all(mem_sids[mm]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sselect_all(file_sids[mm]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Untouched memory and file space for other datasets */ for (i = 0; i < (int)ndsets; i++) { @@ -1677,13 +1736,13 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * --1 smaller mem type */ if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LONG) < 0 || H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { s2_wbufi[i] = s2_total_wbuf + (i * DSET_SELECT_DIM); @@ -1735,17 +1794,17 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(mem_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } free(total_wbuf); @@ -1845,34 +1904,34 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Set up file space ids, mem space ids, and dataset ids */ for (i = 0; i < (int)ndsets; i++) { if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Generate dataset name */ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i, @@ -1881,7 +1940,7 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create ith dataset */ if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_STD_I32BE, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Case a */ @@ -1891,11 +1950,11 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Allocate buffers for all datasets */ if (NULL == (total_wbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_wbuf_bak = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_rbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -1958,11 +2017,11 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Allocate buffers for all datasets */ if (NULL == (total_lwbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_lwbuf_bak = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_lrbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -2033,11 +2092,11 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Allocate buffers for all datasets */ if (NULL == (total_swbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_swbuf_bak = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_srbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -2088,17 +2147,17 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(mem_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } free(total_wbuf); @@ -2278,66 +2337,66 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Enable selection I/O */ if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set dataset layout: contiguous or chunked */ dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Create compound data type: s1_t */ if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create compound data type: s3_t */ if ((s3_tid = H5Tcreate(H5T_COMPOUND, sizeof(s3_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s3_tid, "a", HOFFSET(s3_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s3_tid, "b", HOFFSET(s3_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s3_tid, "c", HOFFSET(s3_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s3_tid, "d", HOFFSET(s3_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create compound data type: s4_t */ if ((s4_tid = H5Tcreate(H5T_COMPOUND, sizeof(s4_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s4_tid, "b", HOFFSET(s4_t, b), H5T_NATIVE_UINT) < 0 || H5Tinsert(s4_tid, "d", HOFFSET(s4_t, d), H5T_NATIVE_UINT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create dataset for i ndsets */ for (i = 0; i < (int)ndsets; i++) { /* File space ids */ if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Memory space ids */ if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; mm = HDrandom() % (int)ndsets; if (mm == 0) { @@ -2346,7 +2405,7 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } else if (mm == 1) { dset_types[i] = DSET_WITH_CONV_AND_NO_BKG; @@ -2354,7 +2413,7 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_LONG, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } else { dset_types[i] = DSET_WITH_CONV_AND_BKG; @@ -2362,7 +2421,7 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } } /* end for i ndsets */ @@ -2372,49 +2431,49 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* DSET_WITH_NO_CONV */ buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); if (NULL == (total_wbuf1 = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (total_wbuf1_bak = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_rbuf1 = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* DSET_WITH_CONV_AND_NO_BKG */ buf_size = ndsets * DSET_SELECT_DIM * sizeof(unsigned long); if (NULL == (ul_total_wbuf2 = (unsigned long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (ul_total_wbuf2_bak = (unsigned long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); if (NULL == (l_total_rbuf2 = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); if (NULL == (l_total_wbuf2 = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (l_total_wbuf2_bak = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(short); if (NULL == (s_total_rbuf2 = (short *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* DSET_WITH_CONV_AND_BKG */ buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); if (NULL == (s1_total_wbuf3 = (s1_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s1_total_wbuf3_bak = (s1_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(s3_t); if (NULL == (s3_total_rbuf3 = (s3_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(s4_t); if (NULL == (s4_total_wbuf3 = (s4_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s4_total_wbuf3_bak = (s4_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); if (NULL == (s1_total_rbuf3 = (s1_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Test with s settings for ndsets */ for (s = SETTING_A; s <= SETTING_B; s++) { @@ -2622,26 +2681,26 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* Closing */ if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s1_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s3_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s4_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Don't delete the last set of datasets */ if ((n + 1) != niter) if (H5Ldelete(fid, dset_names[i], H5P_DEFAULT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Freeing */ @@ -2756,7 +2815,7 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) TESTING("H5Pget/set_selection_io_mode()"); if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) TEST_ERROR; @@ -2788,16 +2847,16 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((did = H5Dcreate2(fid, "test_chk_dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -2805,7 +2864,7 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) /* May change the selection io actually performed */ if (H5Dwrite(did, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) TEST_ERROR; @@ -2815,15 +2874,15 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Fclose(fid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -2882,34 +2941,31 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ } if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Enable page buffering to trigger H5D_PAGE_BUFFER */ if (test_mode & TEST_PAGE_BUFFER) { if (H5Pset_page_buffer_size(fapl, 4096, 0, 0) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } else { /* Not page buffer test, reset to default */ if (H5Pset_page_buffer_size(fapl, 0, 0, 0) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, 0, (hsize_t)1) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; - /* If default mode, 1st write will trigger cb, 2nd write will trigger sieve */ - /* If on mode, will trigger nothing because the on mode path is different */ - /* Need 2 writes */ if (test_mode & TEST_CONTIGUOUS_SIEVE_BUFFER) { no_selection_io_cause_write_expected |= H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER; no_selection_io_cause_read_expected |= H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER; @@ -2917,14 +2973,14 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (test_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET) { if (H5Pset_layout(dcpl, H5D_COMPACT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; no_selection_io_cause_write_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; no_selection_io_cause_read_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; } if (test_mode == TEST_DATASET_FILTER) { if (H5Pset_deflate(dcpl, 9) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; is_chunked = true; no_selection_io_cause_write_expected |= H5D_SEL_IO_DATASET_FILTER; no_selection_io_cause_read_expected |= H5D_SEL_IO_DATASET_FILTER; @@ -2938,7 +2994,7 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (test_mode == TEST_DISABLE_BY_API) { if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_OFF) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; no_selection_io_cause_write_expected |= H5D_SEL_IO_DISABLE_BY_API; no_selection_io_cause_read_expected |= H5D_SEL_IO_DISABLE_BY_API; } @@ -2951,19 +3007,19 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ /* Datatype conversion */ if (test_mode & TEST_DATATYPE_CONVERSION) { if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; tid = H5T_NATIVE_UINT; /* If we're testing a too small tconv buffer, set the buffer to be too small */ if (test_mode & TEST_TCONV_BUF_TOO_SMALL) { if (H5Pset_buffer(dxpl, sizeof(int), NULL, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* If we're using in-place type conversion sel io will succeed and only switch to scalar at the * VFL */ if (test_mode & TEST_IN_PLACE_TCONV) { if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; no_selection_io_cause_write_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; } else @@ -2987,28 +3043,28 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (is_chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if ((did = H5Dcreate2(fid, "no_selection_io_cause", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) wbuf[i] = i; if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (test_mode & TEST_CONTIGUOUS_SIEVE_BUFFER) { if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_write) < 0) @@ -3023,11 +3079,11 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ test_mode & TEST_PAGE_BUFFER) { if (H5Dflush(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if (H5Dread(did, tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify causes of no selection I/O for write is as expected */ if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_read) < 0) @@ -3038,20 +3094,20 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Fclose(fid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(fcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; return SUCCEED; @@ -3085,13 +3141,13 @@ test_get_no_selection_io_cause(const char *filename, hid_t fapl) TESTING("H5Pget_no_selection_io_cause()"); if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* The following tests are based on H5D_SELECTION_IO_MODE_DEFAULT as the default setting in the library; skip the tests if that is not true */ @@ -3208,7 +3264,8 @@ main(void) case TEST_NO_TYPE_CONV: /* case 1 */ TESTING_2("No type conversion (null case)"); - nerrors += (test_no_type_conv(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + nerrors += + (test_no_type_conv(fid, set_cache, chunked, dtrans, mwbuf) < 0 ? 1 : 0); break; @@ -3219,7 +3276,9 @@ main(void) if (dtrans) SKIPPED(); else - nerrors += (test_no_size_change_no_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); + nerrors += + (test_no_size_change_no_bkg(fid, set_cache, chunked, mwbuf) < 0 ? 1 + : 0); break; @@ -3227,7 +3286,9 @@ main(void) TESTING_2("Larger memory type, no background buffer"); nerrors += - (test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + (test_larger_mem_type_no_bkg(fid, set_cache, chunked, dtrans, mwbuf) < 0 + ? 1 + : 0); break; @@ -3235,7 +3296,9 @@ main(void) TESTING_2("Smaller memory type, no background buffer"); nerrors += - (test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + (test_smaller_mem_type_no_bkg(fid, set_cache, chunked, dtrans, mwbuf) < 0 + ? 1 + : 0); break; @@ -3253,7 +3316,7 @@ main(void) case TEST_MULTI_CONV_NO_BKG: /* case 6 */ TESTING_2("multi-datasets: type conv + no bkg buffer"); - nerrors += test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); + nerrors += test_multi_dsets_no_bkg(fid, set_cache, chunked, dtrans, mwbuf); break; diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 7dfb8bc93b4..6c054085ed7 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -69,10 +69,11 @@ typedef enum num_chunks_written_t { typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); -static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options); -static void verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, - num_chunks_written_t chunks_written); -static void verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id); +static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options); +static void verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, + num_chunks_written_t chunks_written); +static void verify_chunk_opt_status(size_t num_dsets, test_mode_t test_mode, bool any_io, bool any_filters, + bool collective, bool unalloc_read, bool did_alloc, hid_t dxpl_id); static const char *test_mode_to_string(test_mode_t test_mode); static void create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t filespace_id, @@ -80,9 +81,11 @@ static void create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t ty static void open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test_mode_t test_mode, hid_t *dset_ids); static void write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, - hid_t *fspace_ids, hid_t dxpl_id, const void **bufs, test_mode_t test_mode); + hid_t *fspace_ids, hid_t dcpl_id, hid_t dxpl_id, const void **bufs, + test_mode_t test_mode, bool any_io, bool collective, bool overwrite); static void read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id, - hid_t dxpl_id, void **bufs, test_mode_t test_mode); + hid_t dcpl_id, hid_t dxpl_id, void **bufs, test_mode_t test_mode, bool any_io, + bool collective, bool all_uninit_read); static void select_hyperslab(size_t num_dsets, hid_t *dset_ids, hsize_t *start, hsize_t *stride, hsize_t *count, hsize_t *block, hid_t *fspace_ids); @@ -471,11 +474,15 @@ verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, * I/O was performed. */ static void -verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id) +verify_chunk_opt_status(size_t num_dsets, test_mode_t test_mode, bool any_io, bool any_filters, + bool collective, bool unalloc_read, bool did_alloc, hid_t dxpl_id) { H5D_mpio_actual_chunk_opt_mode_t chunk_opt_mode; H5D_selection_io_mode_t sel_io_mode; + uint32_t actual_sel_io_mode; + uint32_t actual_sel_io_mode_reduced; uint32_t no_sel_io_cause = 0; + int mpi_code; herr_t ret; if (H5P_DEFAULT != dxpl_id) { @@ -528,6 +535,95 @@ verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id) "verified I/O optimization was linked-chunk I/O"); } } + + /* Verify actual selection I/O mode */ + ret = H5Pget_actual_selection_io_mode(dxpl_id, &actual_sel_io_mode); + VRFY((ret >= 0), "H5Pget_actual_selection_io_mode succeeded"); + + /* Reduce results to process 0 (bitwise OR so we get all I/O types) */ + mpi_code = + MPI_Reduce(&actual_sel_io_mode, &actual_sel_io_mode_reduced, 1, MPI_UINT32_T, MPI_BOR, 0, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Reduce succeeded"); + + /* Verify selection I/O mode on rank 0 */ + if (mpi_rank == 0) { + /* No actual I/O performed, only reported I/O will be from allocation, even if "no" datasets were + * involved (num_dsets == 0 implies the call was expected to fail, but it fails after allocation). + * Also if the test mode is mixed filtered and unfiltered and the call did not fail, then there + * will always be an I/O callback made with raw data. This is because unfiltered datasets fall + * back to scalar I/O when mixed with filtered, and scalar I/O reports an I/O call was made even + * with a size of 0 bytes, while vector I/O does not report I/O was made if passed 0 vector + * elements (because no elements were raw data), which is what happens when performing I/O on a + * filtered dataset with no selection. Vector I/O does report an I/O call was made if passed a raw + * data element of size 0, so this is consistent. */ + if (!any_io) { + if (did_alloc || (num_dsets > 0 && test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + else + VRFY(0 == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was 0 (no I/O)"); + } + /* No filters, library should have used selection I/O if enabled, scalar I/O otherwise */ + else if (!any_filters) { + assert(!unalloc_read && !did_alloc); + if (sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) + VRFY(H5D_SELECTION_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was selection I/O"); + else + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + } + /* Independent I/O, library should have done no I/O if reading from unallocated datasets, scalar + * I/O otherwise, since filtered I/O is only supported with scalar I/O in independent/serial */ + else if (!collective) { + if (unalloc_read) + VRFY(0 == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was 0 (no I/O)"); + else + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + } + else + switch (test_mode) { + case USE_SINGLE_DATASET: + case USE_MULTIPLE_DATASETS: + /* Collective case with only filtered datasets. If we performed allocation then there + * should be scalar I/O for allocation in addition to vector I/O for the actual data. + * If we're reading from an unallocated dataset then there should be no actual I/O. + * Otherwise there should only be vector I/O. */ + if (did_alloc) + VRFY((H5D_SCALAR_IO | H5D_VECTOR_IO) == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar and vector I/O"); + else if (unalloc_read) + VRFY(0 == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was 0 (no I/O)"); + else + VRFY(H5D_VECTOR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was vector I/O"); + break; + + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + /* Collective case with mixed filtered and unfiltered datasets. If we're reading from + * a unallocated datasets then there should be scalar I/O from reading the unfilitered + * datasets, since they are always allocated in parallel. Otherwise there should be + * vector I/O from the filtered datasets and scalar I/O from the unfiltered datasets. + */ + if (unalloc_read) + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + else + VRFY((H5D_SCALAR_IO | H5D_VECTOR_IO) == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar and vector I/O"); + break; + + case TEST_MODE_SENTINEL: + default: + printf("Invalid test mode\n"); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } } } @@ -707,10 +803,12 @@ open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test */ static void write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t *fspace_ids, - hid_t dxpl_id, const void **bufs, test_mode_t test_mode) + hid_t dcpl_id, hid_t dxpl_id, const void **bufs, test_mode_t test_mode, bool any_io, + bool collective, bool overwrite) { - hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; - hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + H5D_alloc_time_t alloc_time = H5D_ALLOC_TIME_DEFAULT; for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { mem_type_ids[dset_idx] = type_id; @@ -738,7 +836,11 @@ write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id MPI_Abort(MPI_COMM_WORLD, -1); } - verify_chunk_opt_status(num_dsets, dxpl_id); + if (!overwrite) + VRFY(H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0, "H5Pget_alloc_time succeeded"); + + verify_chunk_opt_status(num_dsets, test_mode, any_io, true, collective, false, + !overwrite && (alloc_time == H5D_ALLOC_TIME_LATE), dxpl_id); } /* @@ -747,11 +849,13 @@ write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id */ static void read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id, - hid_t dxpl_id, void **bufs, test_mode_t test_mode) + hid_t dcpl_id, hid_t dxpl_id, void **bufs, test_mode_t test_mode, bool any_io, bool collective, + bool all_uninit_read) { - hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; - hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_space_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_space_ids[MAX_NUM_DSETS_MULTI]; + H5D_alloc_time_t alloc_time = H5D_ALLOC_TIME_DEFAULT; for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { mem_type_ids[dset_idx] = type_id; @@ -780,7 +884,13 @@ read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, MPI_Abort(MPI_COMM_WORLD, -1); } - verify_chunk_opt_status(num_dsets, dxpl_id); + if (all_uninit_read) + VRFY(H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0, "H5Pget_alloc_time succeeded"); + + verify_chunk_opt_status(num_dsets, test_mode, any_io, true, collective, + all_uninit_read && + (alloc_time == H5D_ALLOC_TIME_INCR || alloc_time == H5D_ALLOC_TIME_LATE), + false, dxpl_id); } static void @@ -954,8 +1064,8 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -987,7 +1097,8 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil (C_DATATYPE)dset_idx; } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1108,8 +1219,8 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -1138,7 +1249,8 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1265,8 +1377,8 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -1303,7 +1415,8 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil } } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1424,8 +1537,8 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -1456,7 +1569,8 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1593,8 +1707,8 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -1608,8 +1722,8 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -1759,8 +1873,8 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -1774,8 +1888,8 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -1931,8 +2045,8 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -1946,8 +2060,8 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -2106,8 +2220,8 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -2121,8 +2235,8 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -2175,28 +2289,31 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; - const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; - void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ - void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; - hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t data_size, correct_buf_size; - size_t num_dsets; - hid_t dset_ids[MAX_NUM_DSETS_MULTI]; - hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to filtered chunks with a single process having no selection"); + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -2276,8 +2393,8 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi } } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, mpi_size > 1, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2317,7 +2434,8 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi } } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, mpi_size == 1 && alloc_time == H5D_ALLOC_TIME_INCR); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2355,23 +2473,26 @@ static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; - const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; - void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ - void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; - hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t data_size, correct_buf_size; - size_t num_dsets; - hid_t dset_ids[MAX_NUM_DSETS_MULTI]; - hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to filtered chunks with all processes having no selection"); + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -2428,8 +2549,8 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, false, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2453,7 +2574,8 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, alloc_time == H5D_ALLOC_TIME_INCR); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2570,8 +2692,8 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2603,7 +2725,8 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2729,8 +2852,8 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2771,7 +2894,8 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2908,8 +3032,8 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z /* Set data transform expression */ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, plist_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, plist_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2943,7 +3067,8 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3072,8 +3197,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3102,7 +3227,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou (C_DATATYPE)((j % (hsize_t)mpi_size) + (j / (hsize_t)mpi_size) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3232,8 +3358,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3262,7 +3388,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3389,8 +3516,8 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3436,7 +3563,8 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3580,7 +3708,8 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, data_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3613,7 +3742,8 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group } } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3758,7 +3888,8 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, data_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3794,7 +3925,8 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, } } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3853,6 +3985,7 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro hid_t group_id = H5I_INVALID_HID; hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype " @@ -3887,6 +4020,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* Retrieve allocation time */ + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -3971,7 +4107,7 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro * of the H5Dwrite loop: */ /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, - dxpl_id, data_bufs, test_mode); */ + dcpl_id, dxpl_id, data_bufs, test_mode, true, true, false); */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { herr_t expected = FAIL; herr_t ret; @@ -4014,9 +4150,10 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro VRFY((ret == expected), "Dataset write"); if (expected == SUCCEED) - verify_chunk_opt_status(1, dxpl_id); + verify_chunk_opt_status(1, test_mode, true, false, true, false, false, dxpl_id); else - verify_chunk_opt_status(0, dxpl_id); + verify_chunk_opt_status(0, test_mode, false, true, true, false, alloc_time == H5D_ALLOC_TIME_LATE, + dxpl_id); } for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -4042,7 +4179,11 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + /* If some writes succeeded (due to mixed filtered mode) or if allocation time is late, then there is data + * on disk to be read */ + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, false, + !(test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED || alloc_time == H5D_ALLOC_TIME_LATE)); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { hid_t dset_dcpl; @@ -4122,6 +4263,7 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group hid_t group_id = H5I_INVALID_HID; hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion"); @@ -4155,6 +4297,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* Retrieve allocation time */ + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -4239,7 +4384,7 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group * of the H5Dwrite loop: */ /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, - dxpl_id, data_bufs, test_mode); */ + dcpl_id, dxpl_id, data_bufs, test_mode, true, true, false); */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { herr_t expected = FAIL; herr_t ret; @@ -4282,9 +4427,10 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group VRFY((ret == expected), "Dataset write"); if (expected == SUCCEED) - verify_chunk_opt_status(1, dxpl_id); + verify_chunk_opt_status(1, test_mode, true, false, true, false, false, dxpl_id); else - verify_chunk_opt_status(0, dxpl_id); + verify_chunk_opt_status(0, test_mode, false, true, true, false, alloc_time == H5D_ALLOC_TIME_LATE, + dxpl_id); } for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -4310,7 +4456,11 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + /* If some writes succeeded (due to mixed filtered mode) or if allocation time is late, then there is data + * on disk to be read */ + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, false, + !(test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED || alloc_time == H5D_ALLOC_TIME_LATE)); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { hid_t dset_dcpl; @@ -4475,8 +4625,8 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -4531,8 +4681,8 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -4695,8 +4845,8 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -4751,8 +4901,8 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -4917,8 +5067,8 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -4973,8 +5123,8 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -5163,8 +5313,8 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5229,8 +5379,8 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil } } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, mpi_size > 1 ? true : false, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -5394,8 +5544,8 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5438,8 +5588,8 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(data_bufs_nc[dset_idx], 0, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, false, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -5582,8 +5732,8 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5639,8 +5789,8 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -5837,8 +5987,8 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5895,8 +6045,8 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6078,8 +6228,8 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -6142,8 +6292,8 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6335,8 +6485,8 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); @@ -6402,8 +6552,8 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6571,8 +6721,8 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -6634,8 +6784,8 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6818,8 +6968,8 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -6879,8 +7029,8 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7088,7 +7238,8 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7144,7 +7295,8 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7338,7 +7490,8 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7394,7 +7547,8 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7591,7 +7745,8 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7647,7 +7802,8 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, false, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7850,7 +8006,8 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7906,7 +8063,8 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, false, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -8059,8 +8217,8 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -8104,7 +8262,8 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id open_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -8230,8 +8389,8 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -8281,8 +8440,8 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, H5P_DEFAULT, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -8425,8 +8584,8 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, } } - write_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -8440,8 +8599,8 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, } } - read_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8570,8 +8729,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi read_bufs[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, @@ -8584,8 +8743,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi /* Verify the correct data was written */ open_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8635,8 +8794,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, @@ -8652,8 +8811,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8782,8 +8941,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t read_bufs[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -8794,8 +8953,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t /* Verify the correct data was written */ open_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8846,8 +9005,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -8861,8 +9020,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8973,7 +9132,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id } /* Read entire dataset and verify that the fill value is returned */ - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) @@ -9015,8 +9175,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -9027,7 +9187,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); /* * Each MPI rank communicates their written piece of data @@ -9075,8 +9236,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, true); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9087,7 +9248,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { C_DATATYPE *tmp_buf = read_bufs[dset_idx]; @@ -9120,7 +9282,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); /* Read entire dataset and verify that the fill value is returned */ - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) @@ -9155,8 +9318,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -9167,7 +9330,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)(count[1] * block[1]); @@ -9209,8 +9373,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, true); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9221,7 +9385,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { C_DATATYPE *tmp_buf = read_bufs[dset_idx]; @@ -9360,8 +9525,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ * allocation in parallel, so the read should succeed in that case. */ if (alloc_time == H5D_ALLOC_TIME_EARLY) { - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); } else { for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { @@ -9399,9 +9564,11 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ VRFY((ret == expected), "Dataset write"); if (expected == SUCCEED) - verify_chunk_opt_status(1, dxpl_id); + verify_chunk_opt_status(1, test_mode, true, false, true, false, false, dxpl_id); else - verify_chunk_opt_status(0, dxpl_id); + verify_chunk_opt_status( + 0, test_mode, false, true, true, + alloc_time == H5D_ALLOC_TIME_INCR || alloc_time == H5D_ALLOC_TIME_LATE, false, dxpl_id); } } @@ -9435,8 +9602,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -9446,7 +9613,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); @@ -9470,8 +9638,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, true); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9482,7 +9650,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ /* Verify correct data was written */ open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { free(data_bufs_nc[dset_idx]); @@ -9634,7 +9803,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap * yet, because there's no guarantee as to what may have been * read from the dataset. */ - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); /* * Write to part of the first chunk in the dataset with @@ -9669,8 +9839,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -9681,7 +9851,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap /* Verify correct data was written */ open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); @@ -9705,8 +9876,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9717,7 +9888,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap /* Verify correct data was written */ open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { C_DATATYPE *tmp_buf = read_bufs[dset_idx]; diff --git a/testpar/t_select_io_dset.c b/testpar/t_select_io_dset.c index 2e6839efcec..2be2b407236 100644 --- a/testpar/t_select_io_dset.c +++ b/testpar/t_select_io_dset.c @@ -159,7 +159,7 @@ set_dxpl(hid_t dxpl, H5D_selection_io_mode_t select_io_mode, H5FD_mpio_xfer_t mp } /* set_dxpl() */ /* - * Helper routine to check actual I/O mode on a dxpl + * Helper routine to check actual parallel I/O mode on a dxpl */ static void check_io_mode(hid_t dxpl, unsigned chunked) @@ -186,29 +186,65 @@ check_io_mode(hid_t dxpl, unsigned chunked) } /* check_io_mode() */ +static void +testing_check_io_mode(hid_t dxpl, H5D_mpio_actual_io_mode_t exp_io_mode) +{ + H5D_mpio_actual_io_mode_t actual_io_mode; + + if (H5Pget_mpio_actual_io_mode(dxpl, &actual_io_mode) < 0) + P_TEST_ERROR; + + if (actual_io_mode != exp_io_mode) { + nerrors++; + if (MAINPROCESS) + printf("\n Failed: Incorrect I/O mode (expected/actual) %u:%u", (unsigned)exp_io_mode, + (unsigned)actual_io_mode); + } + +} /* testing_check_io_mode() */ + +/* + * Helper routine to check actual selection I/O mode on a dxpl + */ +static void +check_actual_selection_io_mode(hid_t dxpl, uint32_t sel_io_mode_expected) +{ + uint32_t actual_sel_io_mode; + + if (H5Pget_actual_selection_io_mode(dxpl, &actual_sel_io_mode) < 0) + P_TEST_ERROR; + if (actual_sel_io_mode != sel_io_mode_expected) { + if (MAINPROCESS) + printf("\n Failed: Incorrect selection I/O mode (expected/actual) %u:%u", + (unsigned)sel_io_mode_expected, (unsigned)actual_sel_io_mode); + P_TEST_ERROR; + } +} + /* * Case 1: single dataset read/write, no type conversion (null case) */ static void -test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { - int i; - hid_t did = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl = H5I_INVALID_HID; - hid_t ntrans_dxpl = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - hsize_t dims[1]; - hsize_t cdims[1]; - hsize_t start[1], stride[1], count[1], block[1]; - int wbuf[DSET_SELECT_DIM]; - int wbuf_bak[DSET_SELECT_DIM]; - int trans_wbuf[DSET_SELECT_DIM]; - int rbuf[DSET_SELECT_DIM]; - char dset_name[DSET_NAME_LEN]; - const char *expr = "2*x"; + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + int wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + int trans_wbuf[DSET_SELECT_DIM]; + int rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2*x"; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; curr_nerrors = nerrors; @@ -224,11 +260,13 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) P_TEST_ERROR; + if (!dtrans && H5Pset_deflate(dcpl, 2) < 0) + P_TEST_ERROR; } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -262,7 +300,8 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -284,7 +323,14 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) if (mwbuf) memcpy(wbuf, wbuf_bak, sizeof(wbuf)); - check_io_mode(dxpl, chunked); + if (!dtrans || select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + testing_check_io_mode(dxpl, exp_io_mode); + + if (chunked && !dtrans) + check_actual_selection_io_mode(dxpl, H5D_VECTOR_IO); + else + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread(did, H5T_NATIVE_INT, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) @@ -327,6 +373,8 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) P_TEST_ERROR; if (H5Pclose(dxpl) < 0) P_TEST_ERROR; + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) P_TEST_ERROR; @@ -339,7 +387,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) * Case 2: single dataset read/write, no size change, no background buffer */ static void -test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -356,6 +404,8 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) char *rbuf = NULL; char dset_name[DSET_NAME_LEN]; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; + curr_nerrors = nerrors; if ((wbuf = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) @@ -379,8 +429,8 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s", chunked ? "chunked" : "contig", - mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s_%s", chunked ? "chunked" : "contig", + select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_STD_I32BE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -416,7 +466,8 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -430,7 +481,11 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) if (mwbuf) memcpy(wbuf, wbuf_bak, (size_t)(4 * DSET_SELECT_DIM)); - check_io_mode(dxpl, chunked); + if (select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + + testing_check_io_mode(dxpl, exp_io_mode); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read the data from the dataset with little endian */ if (H5Dread(did, H5T_STD_I32LE, mspace_id, fspace_id, dxpl, rbuf) < 0) @@ -489,25 +544,26 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Case 3: single dataset read/write, larger mem type, no background buffer */ static void -test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { - int i; - hid_t did = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl = H5I_INVALID_HID; - hid_t ntrans_dxpl = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - hsize_t dims[1]; - hsize_t cdims[1]; - hsize_t start[1], stride[1], count[1], block[1]; - long wbuf[DSET_SELECT_DIM]; - long wbuf_bak[DSET_SELECT_DIM]; - long trans_wbuf[DSET_SELECT_DIM]; - long long rbuf[DSET_SELECT_DIM]; - char dset_name[DSET_NAME_LEN]; - const char *expr = "100 - x"; + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + long wbuf[DSET_SELECT_DIM]; + long wbuf_bak[DSET_SELECT_DIM]; + long trans_wbuf[DSET_SELECT_DIM]; + long long rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "100 - x"; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; curr_nerrors = nerrors; @@ -525,8 +581,8 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d chunked dataset with/without data transform */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -560,7 +616,8 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -582,7 +639,11 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign if (mwbuf) memcpy(wbuf, wbuf_bak, sizeof(wbuf)); - check_io_mode(dxpl, chunked); + if (select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + + testing_check_io_mode(dxpl, exp_io_mode); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread(did, H5T_NATIVE_LLONG, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) @@ -637,25 +698,26 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign * Case 4: single dataset reader/write, smaller mem type, no background buffer */ static void -test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { - int i; - hid_t did = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl = H5I_INVALID_HID; - hid_t ntrans_dxpl = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - hsize_t dims[1]; - hsize_t cdims[1]; - hsize_t start[1], stride[1], count[1], block[1]; - short wbuf[DSET_SELECT_DIM]; - int wbuf_bak[DSET_SELECT_DIM]; - short trans_wbuf[DSET_SELECT_DIM]; - short rbuf[DSET_SELECT_DIM]; - char dset_name[DSET_NAME_LEN]; - const char *expr = "2 * (10 + x)"; + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + short wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + short trans_wbuf[DSET_SELECT_DIM]; + short rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2 * (10 + x)"; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; curr_nerrors = nerrors; @@ -673,8 +735,8 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d chunked dataset with/without data transform */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -708,7 +770,8 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -731,7 +794,11 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig if (mwbuf) memcpy(wbuf, wbuf_bak, sizeof(wbuf)); - check_io_mode(dxpl, chunked); + if (select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + + testing_check_io_mode(dxpl, exp_io_mode); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread(did, H5T_NATIVE_SHORT, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) @@ -804,7 +871,7 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig * Verify the values read */ static void -test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -870,8 +937,8 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Case 5(a) */ /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s", chunked ? "chunked" : "contig", - mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s_%s", chunked ? "chunked" : "contig", + select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -907,7 +974,8 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -917,12 +985,12 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) if (H5Dwrite(did, s1_tid, mspace_id, fspace_id, dxpl, s1_wbuf) < 0) P_TEST_ERROR; + check_io_mode(dxpl, chunked); + /* Restore wbuf from backup if the library modified it */ if (mwbuf) memcpy(s1_wbuf, s1_wbuf_bak, sizeof(s1_t) * DSET_SELECT_DIM); - check_io_mode(dxpl, chunked); - /* Read all the data from the dataset */ memset(s1_rbuf, 0, sizeof(s1_t) * DSET_SELECT_DIM); if (H5Dread(did, s1_tid, mspace_id, fspace_id, dxpl, s1_rbuf) < 0) @@ -1094,6 +1162,10 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; if (H5Tclose(ss_bc_tid) < 0) P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; if (H5Dclose(did) < 0) P_TEST_ERROR; @@ -1115,7 +1187,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Case 6: Type conversions + some processes have null/empty selections in datasets */ static void -test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -1158,8 +1230,8 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "tconv_sel_empty_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "tconv_sel_empty_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -1170,7 +1242,8 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -1210,7 +1283,7 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned /* Create a memory dataspace */ if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) P_TEST_ERROR; - if (mpi_rank) { + if (!MAINPROCESS) { if (H5Sselect_none(mspace_id) < 0) P_TEST_ERROR; } @@ -1227,7 +1300,13 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned if (mwbuf) memcpy(lwbuf, lwbuf_bak, sizeof(lwbuf)); - check_io_mode(dxpl, chunked); + /* If not using selection I/O there will be no collective I/O, since type conversion is unsupported by + * legacy collective I/O */ + testing_check_io_mode( + dxpl, select ? (chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE) : 0); + + /* If not using selection I/O then the main process will do scalar I/O and others will do none */ + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : (MAINPROCESS ? H5D_SCALAR_IO : 0)); /* Read the data from the dataset: type conversion int-->long */ /* If dtrans, without data transform set in dxpl */ @@ -1395,7 +1474,7 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned * Datatype for all datasets: H5T_NATIVE_LONG */ static void -test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j; @@ -1410,6 +1489,8 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m hid_t mem_sids[MULTI_NUM_DSETS]; hid_t mem_tids[MULTI_NUM_DSETS]; + bool any_tconv = false; + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; hid_t dset_dids[MULTI_NUM_DSETS]; @@ -1457,7 +1538,8 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -1469,17 +1551,24 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m /* Set up file space ids and dataset ids */ for (i = 0; i < (int)ndsets; i++) { + bool tconv; + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i, - chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s_%s", i, + chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); + + /* Flip a coin to see if we're doing type conversion */ + tconv = HDrandom() % 2; + if (tconv) + any_tconv = true; /* Create ith dataset */ - if ((dset_dids[i] = - H5Dcreate2(fid, dset_names[i], ((HDrandom() % 2) ? H5T_NATIVE_LONG : H5T_NATIVE_INT), - file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], (tconv ? H5T_NATIVE_LONG : H5T_NATIVE_INT), + file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; } @@ -1555,7 +1644,12 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (mwbuf) memcpy(total_wbuf, total_wbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); - check_io_mode(dxpl, chunked); + /* If doing type conversion or transform and not using selection I/O there will be no collective I/O, + * since type conversion is unsupported by legacy collective I/O */ + testing_check_io_mode(dxpl, ((any_tconv || dtrans) && !select) + ? 0 + : (chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE)); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, ntrans_dxpl, rbufs) < 0) @@ -1708,7 +1802,7 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m * --Verify values read */ static void -test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j, mm; @@ -1769,7 +1863,8 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Each process takes x number of elements */ block[0] = dims[0] / (hsize_t)mpi_size; @@ -1794,8 +1889,8 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", mwbuf ? "mwbuf" : "nomwbuf"); /* Create ith dataset */ if ((dset_dids[i] = @@ -1860,6 +1955,7 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) memcpy(total_wbuf, total_wbuf_bak, buf_size); check_io_mode(dxpl, chunked); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) P_TEST_ERROR; @@ -2164,7 +2260,7 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Datatype for all datasets: H5T_STD_I16BE */ static void -test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j; @@ -2222,7 +2318,8 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Set up file space ids, mem space ids, and dataset ids */ for (i = 0; i < (int)ndsets; i++) { @@ -2230,8 +2327,8 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", mwbuf ? "mwbuf" : "nomwbuf"); /* Create ith dataset */ if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_STD_I32BE, file_sids[i], H5P_DEFAULT, dcpl, @@ -2301,6 +2398,7 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) memcpy(total_wbuf, total_wbuf_bak, buf_size); check_io_mode(dxpl, chunked); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset */ if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) @@ -2510,7 +2608,7 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * --this will trigger type conversion for (a), (b) & (c) */ static void -test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j; @@ -2568,7 +2666,8 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -2584,8 +2683,9 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_sel_dset%d_%s_%s_%s", i, - chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_sel_dset%d_%s_%s_%s_%s", i, + chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if (i == 0) { if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, @@ -2769,7 +2869,11 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un if (mwbuf) memcpy(total_wbuf, total_wbuf_bak, buf_size); - check_io_mode(dxpl, chunked); + /* If not using selection I/O there will be no collective I/O, since type conversion is unsupported by + * legacy collective I/O */ + testing_check_io_mode( + dxpl, select ? (chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE) : 0); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -2903,7 +3007,7 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un * --fields 'b' and 'd' are (DSET_SELECT_DIM + j + start[0]) */ static void -test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) +test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j, mm; @@ -2920,6 +3024,8 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) hid_t mem_tids[MULTI_NUM_DSETS]; hid_t r_mem_tids[MULTI_NUM_DSETS]; + bool any_tconv; + multi_dset_type_t dset_types[MULTI_NUM_DSETS]; hid_t s1_tid = H5I_INVALID_HID; @@ -2978,7 +3084,8 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Set dataset layout: contiguous or chunked */ dims[0] = DSET_SELECT_DIM; @@ -3039,24 +3146,27 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) mm = HDrandom() % (int)ndsets; if (mm == 0) { dset_types[i] = DSET_WITH_NO_CONV; - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; } else if (mm == 1) { dset_types[i] = DSET_WITH_CONV_AND_NO_BKG; - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_LONG, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; } else { dset_types[i] = DSET_WITH_CONV_AND_BKG; - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; @@ -3119,6 +3229,8 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* Test with s settings for ndsets */ for (s = SETTING_A; s <= SETTING_B; s++) { + any_tconv = false; + /* for i ndsets */ for (i = 0; i < (int)ndsets; i++) { @@ -3171,6 +3283,9 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) mem_tids[i] = H5T_NATIVE_LONG; r_mem_tids[i] = H5T_NATIVE_SHORT; + + /* There is type conversion in the read op */ + any_tconv = true; } break; @@ -3194,6 +3309,9 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) } mem_tids[i] = s1_tid; r_mem_tids[i] = s3_tid; + + /* There is type conversion in the read op */ + any_tconv = true; } else if (s == SETTING_B) { /* Initialize buffer indices */ @@ -3246,7 +3364,12 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) if (H5Dread_multi(ndsets, dset_dids, r_mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) P_TEST_ERROR; - check_io_mode(dxpl, chunked); + /* If doing type conversion and not using selection I/O there will be no collective I/O, since + * type conversion is unsupported by legacy collective I/O */ + testing_check_io_mode(dxpl, (any_tconv && !select) ? 0 + : (chunked ? H5D_MPIO_CHUNK_COLLECTIVE + : H5D_MPIO_CONTIGUOUS_COLLECTIVE)); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Verify result read */ /* for i ndsets */ @@ -3422,6 +3545,8 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) P_TEST_ERROR; + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, false); + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) P_TEST_ERROR; @@ -3442,20 +3567,12 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ /* Datatype conversion */ if (test_mode & TEST_DATATYPE_CONVERSION) { - /* With one exception, all will land at H5FD__mpio_read/write_selection(). - * As the xfer mode is H5FD_MPIO_INDEPENDENT, this will call - * H5FD__read/write_from_selection() triggering H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB. - */ - no_selection_io_cause_read_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + /* With one exception, all will land at H5FD__mpio_read/write_selection() */ - /* Exception case: This will turn off selection I/O landing at H5FD__mpio_write() */ - if ((test_mode & TEST_TCONV_BUF_TOO_SMALL) && !(test_mode & TEST_IN_PLACE_TCONV)) - no_selection_io_cause_write_expected |= H5D_SEL_IO_TCONV_BUF_TOO_SMALL; - else - no_selection_io_cause_write_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + if (test_mode & TEST_IN_PLACE_TCONV) + if (H5Pset_modify_write_buf(dxpl, true) < 0) + P_TEST_ERROR; - if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - P_TEST_ERROR; tid = H5T_NATIVE_UINT; /* If we're testing a too small tconv buffer, set the buffer to be too small */ @@ -3463,11 +3580,13 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (H5Pset_buffer(dxpl, sizeof(int), NULL, NULL) < 0) P_TEST_ERROR; - if (test_mode & TEST_IN_PLACE_TCONV) { - if (H5Pset_modify_write_buf(dxpl, true) < 0) - P_TEST_ERROR; - } - /* In-place type conversion for read doesn't require modify_write_buf */ + /* Exception case: When the type conversion buffer is too small and we're not allowing the library + * to modify the write buffer, the library will fall back to scalar independent I/O since the + * selection I/O path with type conversion requires a full size conversion buffer */ + if (!(test_mode & TEST_IN_PLACE_TCONV)) + /* In-place type conversion for read doesn't require modify_write_buf, so the read will still + * use selection I/O */ + no_selection_io_cause_write_expected |= H5D_SEL_IO_TCONV_BUF_TOO_SMALL; } } @@ -3493,6 +3612,10 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) P_TEST_ERROR; + if (!(test_mode & TEST_DISABLE_BY_API || test_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET || + ((test_mode & TEST_TCONV_BUF_TOO_SMALL) && !(test_mode & TEST_IN_PLACE_TCONV)))) + check_actual_selection_io_mode(dxpl, H5D_SELECTION_IO); + if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_write) < 0) P_TEST_ERROR; @@ -3535,9 +3658,6 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ static void test_get_no_selection_io_cause(const char *filename, hid_t fapl) { - hid_t dxpl = H5I_INVALID_HID; - H5D_selection_io_mode_t selection_io_mode; - if (MAINPROCESS) { printf("\n"); TESTING("for H5Pget_no_selection_io_cause()"); @@ -3545,21 +3665,6 @@ test_get_no_selection_io_cause(const char *filename, hid_t fapl) curr_nerrors = nerrors; - if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - P_TEST_ERROR; - if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) - P_TEST_ERROR; - if (H5Pclose(dxpl) < 0) - P_TEST_ERROR; - - /* The following tests are based on H5D_SELECTION_IO_MODE_DEFAULT as the - * default setting in the library; skip the tests if that is not true */ - if (selection_io_mode != H5D_SELECTION_IO_MODE_DEFAULT) { - if (MAINPROCESS) - SKIPPED(); - return; - } - test_no_selection_io_cause_mode(filename, fapl, TEST_DISABLE_BY_API); test_no_selection_io_cause_mode(filename, fapl, TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET); test_no_selection_io_cause_mode(filename, fapl, TEST_DATATYPE_CONVERSION); @@ -3952,6 +4057,7 @@ main(int argc, char *argv[]) int test_select_config; unsigned chunked; unsigned dtrans; + unsigned select; unsigned mwbuf; h5_reset(); @@ -3978,163 +4084,170 @@ main(int argc, char *argv[]) /* therefore, not all tests are run with data transform */ for (dtrans = false; dtrans <= true; dtrans++) { - /* Test with and without modify_write_buf turned on */ - for (mwbuf = false; mwbuf <= true; mwbuf++) { - - if (MAINPROCESS) { - /* Print configuration message */ - printf("Testing for selection I/O "); - if (chunked) - printf("with chunked dataset, "); - else - printf("with contiguous dataset, "); - if (dtrans) - printf("data transform, "); - else - printf("without data transform, "); - if (mwbuf) - printf("and with modifying write buffers\n"); - else - printf("and without modifying write buffers\n"); - } + for (select = false; select <= true; select++) { + + /* Test with and without modify_write_buf turned on */ + for (mwbuf = false; mwbuf <= true; mwbuf++) { + + if (MAINPROCESS) { + /* Print configuration message */ + printf("Testing for selection I/O "); + if (chunked) + printf("with chunked dataset, "); + else + printf("with contiguous dataset, "); + if (dtrans) + printf("data transform, "); + else + printf("without data transform, "); + if (select) + printf("selection I/O ON, "); + else + printf("selection I/O OFF, "); + if (mwbuf) + printf("and with modifying write buffers\n"); + else + printf("and without modifying write buffers\n"); + } + + for (test_select_config = (int)TEST_NO_TYPE_CONV; + test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { + + switch (test_select_config) { + + case TEST_NO_TYPE_CONV: /* case 1 */ + if (MAINPROCESS) + TESTING_2("No type conversion (null case)"); - for (test_select_config = (int)TEST_NO_TYPE_CONV; - test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { + test_no_type_conv(fid, chunked, dtrans, select, mwbuf); - switch (test_select_config) { + break; - case TEST_NO_TYPE_CONV: /* case 1 */ - if (MAINPROCESS) - TESTING_2("No type conversion (null case)"); + case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ + if (MAINPROCESS) + TESTING_2("No size change, no background buffer"); - test_no_type_conv(fid, chunked, dtrans, mwbuf); + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + continue; + } - break; + test_no_size_change_no_bkg(fid, chunked, select, mwbuf); - case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ - if (MAINPROCESS) - TESTING_2("No size change, no background buffer"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_LARGER_MEM_NO_BKG: /* case 3 */ if (MAINPROCESS) - SKIPPED(); - continue; - } - - test_no_size_change_no_bkg(fid, chunked, mwbuf); + TESTING_2("Larger memory type, no background buffer"); - break; + test_larger_mem_type_no_bkg(fid, chunked, dtrans, select, mwbuf); - case TEST_LARGER_MEM_NO_BKG: /* case 3 */ - if (MAINPROCESS) - TESTING_2("Larger memory type, no background buffer"); - - test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf); - - break; + break; - case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ - if (MAINPROCESS) - TESTING_2("Smaller memory type, no background buffer"); + case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ + if (MAINPROCESS) + TESTING_2("Smaller memory type, no background buffer"); - test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf); + test_smaller_mem_type_no_bkg(fid, chunked, dtrans, select, mwbuf); - break; + break; - case TEST_CMPD_WITH_BKG: /* case 5 */ - if (MAINPROCESS) - TESTING_2("Compound types with background buffer"); - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_CMPD_WITH_BKG: /* case 5 */ if (MAINPROCESS) - SKIPPED(); - continue; - } - - test_cmpd_with_bkg(fid, chunked, mwbuf); + TESTING_2("Compound types with background buffer"); + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + continue; + } - break; + test_cmpd_with_bkg(fid, chunked, select, mwbuf); - case TEST_TYPE_CONV_SEL_EMPTY: /* case 6 */ - if (MAINPROCESS) - TESTING_2("Empty selections + Type conversion"); + break; - test_type_conv_sel_empty(fid, chunked, dtrans, mwbuf); + case TEST_TYPE_CONV_SEL_EMPTY: /* case 6 */ + if (MAINPROCESS) + TESTING_2("Empty selections + Type conversion"); - break; + test_type_conv_sel_empty(fid, chunked, dtrans, select, mwbuf); - case TEST_MULTI_CONV_NO_BKG: /* case 7 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + no bkg buffer"); + break; - test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); + case TEST_MULTI_CONV_NO_BKG: /* case 7 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + no bkg buffer"); - break; + test_multi_dsets_no_bkg(fid, chunked, dtrans, select, mwbuf); - case TEST_MULTI_CONV_BKG: /* case 8 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + bkg buffer"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_MULTI_CONV_BKG: /* case 8 */ if (MAINPROCESS) - SKIPPED(); - } - else - test_multi_dsets_cmpd_with_bkg(fid, chunked, mwbuf); + TESTING_2("multi-datasets: type conv + bkg buffer"); - break; + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_cmpd_with_bkg(fid, chunked, select, mwbuf); - case TEST_MULTI_CONV_SIZE_CHANGE: /* case 9 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_MULTI_CONV_SIZE_CHANGE: /* case 9 */ if (MAINPROCESS) - SKIPPED(); - } - else - test_multi_dsets_size_change_no_bkg(fid, chunked, mwbuf); + TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); - break; + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_size_change_no_bkg(fid, chunked, select, mwbuf); - case TEST_MULTI_CONV_SEL_EMPTY: /* case 10 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + empty selections"); + break; - test_multi_dsets_conv_sel_empty(fid, chunked, dtrans, mwbuf); + case TEST_MULTI_CONV_SEL_EMPTY: /* case 10 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + empty selections"); - break; + test_multi_dsets_conv_sel_empty(fid, chunked, dtrans, select, mwbuf); - case TEST_MULTI_ALL: /* case 11 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_MULTI_ALL: /* case 11 */ if (MAINPROCESS) - SKIPPED(); - } - else - test_multi_dsets_all(2, fid, chunked, mwbuf); + TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); + + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_all(2, fid, chunked, select, mwbuf); - break; + break; - case TEST_SELECT_NTESTS: - default: - P_TEST_ERROR; - break; + case TEST_SELECT_NTESTS: + default: + P_TEST_ERROR; + break; - } /* end switch */ + } /* end switch */ - } /* end for test_select_config */ + } /* end for test_select_config */ - } /* end mwbuf */ + } /* end mwbuf */ - } /* end dtrans */ - } /* end chunked */ + } /* end select */ + } /* end dtrans */ + } /* end chunked */ if (H5Fclose(fid) < 0) P_TEST_ERROR; From b5c63fb3fe6ee04b41d80368b406b5d45502ba00 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 20 Oct 2023 12:57:09 -0500 Subject: [PATCH 032/101] Test scripts now execute in-source with creation of tmp dir (#3723) Fixes a few issues created in #3580: * Fixes a problem where committed tools test files were deleted when cleaning after an in-source build * Fixes issues with test file paths in Autotools tools test scripts --- CMakeInstallation.cmake | 2 +- CMakePresets.json | 2 +- tools/test/h5copy/testh5copy.sh.in | 2 +- tools/test/h5diff/h5diff_plugin.sh.in | 2 +- tools/test/h5diff/testh5diff.sh.in | 2 +- tools/test/h5dump/h5dump_plugin.sh.in | 2 +- tools/test/h5dump/testh5dump.sh.in | 2 +- tools/test/h5dump/testh5dumppbits.sh.in | 4 ++-- tools/test/h5dump/testh5dumpvds.sh.in | 4 ++-- tools/test/h5dump/testh5dumpxml.sh.in | 2 +- tools/test/h5format_convert/CMakeTests.cmake | 8 ++++---- tools/test/h5format_convert/expected/h5fc_ext1_f.ddl | 2 +- tools/test/h5format_convert/expected/h5fc_ext1_i.ddl | 2 +- tools/test/h5format_convert/expected/h5fc_ext1_s.ddl | 2 +- tools/test/h5format_convert/expected/h5fc_ext2_if.ddl | 2 +- tools/test/h5format_convert/expected/h5fc_ext2_is.ddl | 2 +- tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl | 2 +- .../test/h5format_convert/expected/h5fc_ext3_isf.ddl | 2 +- .../h5format_convert/expected/old_h5fc_ext1_f.ddl | 2 +- .../h5format_convert/expected/old_h5fc_ext1_i.ddl | 2 +- .../h5format_convert/expected/old_h5fc_ext1_s.ddl | 2 +- .../h5format_convert/expected/old_h5fc_ext2_if.ddl | 2 +- .../h5format_convert/expected/old_h5fc_ext2_is.ddl | 2 +- .../h5format_convert/expected/old_h5fc_ext2_sf.ddl | 2 +- .../h5format_convert/expected/old_h5fc_ext3_isf.ddl | 2 +- tools/test/h5format_convert/testh5fc.sh.in | 11 +++++++++-- tools/test/h5import/h5importtestutil.sh.in | 2 +- tools/test/h5jam/testh5jam.sh.in | 2 +- tools/test/h5ls/h5ls_plugin.sh.in | 2 +- tools/test/h5ls/testh5ls.sh.in | 2 +- tools/test/h5ls/testh5lsvds.sh.in | 4 ++-- tools/test/h5repack/h5repack.sh.in | 2 +- tools/test/h5repack/h5repack_plugin.sh.in | 2 +- tools/test/h5stat/testh5stat.sh.in | 2 +- tools/test/misc/testh5clear.sh.in | 2 +- tools/test/misc/testh5mkgrp.sh.in | 2 +- tools/test/misc/testh5repart.sh.in | 2 +- utils/tools/test/h5dwalk/copy_demo_files.sh.in | 2 +- utils/tools/test/h5dwalk/testh5dwalk.sh.in | 2 +- 39 files changed, 53 insertions(+), 46 deletions(-) diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index d42142a6da0..bb244764de2 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -151,9 +151,9 @@ if (HDF5_PACK_EXAMPLES) COMPONENT hdfdocuments ) - option (EXAMPLES_USE_RELEASE_NAME "Use the released examples artifact name" OFF) option (EXAMPLES_DOWNLOAD "Download to use released examples files" OFF) if (EXAMPLES_DOWNLOAD) + option (EXAMPLES_USE_RELEASE_NAME "Use the released examples artifact name" OFF) if (EXAMPLES_USE_RELEASE_NAME) set (EXAMPLES_NAME ${EXAMPLES_TGZ_ORIGNAME}) else () diff --git a/CMakePresets.json b/CMakePresets.json index 7b70970b17b..48393df02ae 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -91,7 +91,7 @@ "HDF5_EXAMPLES_COMPRESSED": {"type": "STRING", "value": "hdf5-examples-master.tar.gz"}, "HDF5_EXAMPLES_COMPRESSED_DIR": {"type": "PATH", "value": "${sourceParentDir}/temp"}, "EXAMPLES_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5-examples/releases/download/snapshot"}, - "EXAMPLES_TGZ_ORIGNAME": {"type": "STRING", "value": "snapshot.tar.gz"} + "EXAMPLES_TGZ_ORIGNAME": {"type": "STRING", "value": "hdf5-examples-2.0.4.tar.gz"} } }, { diff --git a/tools/test/h5copy/testh5copy.sh.in b/tools/test/h5copy/testh5copy.sh.in index f2c4a8e1237..916e2bec3e9 100644 --- a/tools/test/h5copy/testh5copy.sh.in +++ b/tools/test/h5copy/testh5copy.sh.in @@ -84,7 +84,7 @@ nerrors=0 verbose=yes h5haveexitcode=yes # default is yes -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR # RUNSERIAL is used. Check if it can return exit code from executalbe correctly. diff --git a/tools/test/h5diff/h5diff_plugin.sh.in b/tools/test/h5diff/h5diff_plugin.sh.in index 525503d68eb..f6783af8ff9 100644 --- a/tools/test/h5diff/h5diff_plugin.sh.in +++ b/tools/test/h5diff/h5diff_plugin.sh.in @@ -47,7 +47,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5DIFF_TESTFILES="$SRC_TOOLS/test/h5diff/testfiles" SRC_H5DIFF_OUTFILES="$SRC_TOOLS/test/h5diff/expected" -TESTDIR=./testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5diff/testh5diff.sh.in b/tools/test/h5diff/testh5diff.sh.in index efaac2e4082..1378f07cf34 100644 --- a/tools/test/h5diff/testh5diff.sh.in +++ b/tools/test/h5diff/testh5diff.sh.in @@ -49,7 +49,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5DIFF_OUTFILES="$SRC_TOOLS/test/h5diff/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5dump/h5dump_plugin.sh.in b/tools/test/h5dump/h5dump_plugin.sh.in index d080c1da6a8..c9e485d565f 100644 --- a/tools/test/h5dump/h5dump_plugin.sh.in +++ b/tools/test/h5dump/h5dump_plugin.sh.in @@ -48,7 +48,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5DUMP_TESTFILES="$SRC_TOOLS/test/h5dump/testfiles" SRC_H5DUMP_OUTFILES="$SRC_TOOLS/test/h5dump/expected" -TESTDIR=./testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in index 8796aa91fa7..fdeb17d29aa 100644 --- a/tools/test/h5dump/testh5dump.sh.in +++ b/tools/test/h5dump/testh5dump.sh.in @@ -66,7 +66,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/std +TESTDIR=./tmp test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR diff --git a/tools/test/h5dump/testh5dumppbits.sh.in b/tools/test/h5dump/testh5dumppbits.sh.in index 4094bfc8da7..e90cc86e2ee 100644 --- a/tools/test/h5dump/testh5dumppbits.sh.in +++ b/tools/test/h5dump/testh5dumppbits.sh.in @@ -58,7 +58,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/pbits +TESTDIR=./tmpbits test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR @@ -187,7 +187,7 @@ CLEAN_TESTFILES_AND_TESTDIR() # skip rm if srcdir is same as destdir # this occurs when build/test performed in source dir and # make cp fail - SDIR=$SRC_H5DUMP_TESTFILES/pbits + SDIR=$SRC_H5DUMP_TESTFILES INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then diff --git a/tools/test/h5dump/testh5dumpvds.sh.in b/tools/test/h5dump/testh5dumpvds.sh.in index e09e429eb39..2bd38dc8a32 100644 --- a/tools/test/h5dump/testh5dumpvds.sh.in +++ b/tools/test/h5dump/testh5dumpvds.sh.in @@ -58,7 +58,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/vds +TESTDIR=./tmpvds test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR @@ -166,7 +166,7 @@ CLEAN_TESTFILES_AND_TESTDIR() # skip rm if srcdir is same as destdir # this occurs when build/test performed in source dir and # make cp fail - SDIR=$SRC_H5DUMP_TESTFILES/vds + SDIR=$SRC_H5DUMP_TESTFILES INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then diff --git a/tools/test/h5dump/testh5dumpxml.sh.in b/tools/test/h5dump/testh5dumpxml.sh.in index b433fa210e3..880fc6be340 100644 --- a/tools/test/h5dump/testh5dumpxml.sh.in +++ b/tools/test/h5dump/testh5dumpxml.sh.in @@ -49,7 +49,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5DUMP_OUTFILES="$SRC_TOOLS/test/h5dump/expected" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/xml +TESTDIR=./tmpxml test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR diff --git a/tools/test/h5format_convert/CMakeTests.cmake b/tools/test/h5format_convert/CMakeTests.cmake index 3046521a365..ab14a8c09a6 100644 --- a/tools/test/h5format_convert/CMakeTests.cmake +++ b/tools/test/h5format_convert/CMakeTests.cmake @@ -378,11 +378,11 @@ COMMAND "${CMAKE_COMMAND}" -D "TEST_EMULATOR=${CMAKE_CROSSCOMPILING_EMULATOR}" -D "TEST_PROGRAM=$" - -D "TEST_ARGS:STRING=-BH;./testfiles/${testname}-tmp.h5" - -D "TEST_FOLDER=${PROJECT_BINARY_DIR}" - -D "TEST_OUTPUT=testfiles/${testname}_chk.out" + -D "TEST_ARGS:STRING=-BH;${testname}-tmp.h5" + -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles" + -D "TEST_OUTPUT=${testname}_chk.out" -D "TEST_EXPECT=0" - -D "TEST_REFERENCE=testfiles/${testname}.ddl" + -D "TEST_REFERENCE=${testname}.ddl" -P "${HDF_RESOURCES_DIR}/runTest.cmake" ) set_tests_properties (H5FC_H5DUMP_CHECK-${testname}-dump PROPERTIES diff --git a/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl b/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl index f78891384db..98806a730c4 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext1_f-tmp.h5" { +HDF5 "h5fc_ext1_f-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl b/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl index 65640314077..03729100837 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext1_i-tmp.h5" { +HDF5 "h5fc_ext1_i-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl b/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl index 746de2b1d7d..f97ebf334f6 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext1_s-tmp.h5" { +HDF5 "h5fc_ext1_s-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl b/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl index 57781ecdada..003defcfdef 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext2_if-tmp.h5" { +HDF5 "h5fc_ext2_if-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl b/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl index 8fd061d5c78..6b5d0d624c3 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext2_is-tmp.h5" { +HDF5 "h5fc_ext2_is-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl b/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl index 435ed464384..47e8c3bd0da 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext2_sf-tmp.h5" { +HDF5 "h5fc_ext2_sf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl b/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl index 57a78d3398e..bd7058ab2ea 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext3_isf-tmp.h5" { +HDF5 "h5fc_ext3_isf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl index 45fa3fbd438..420191bc301 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext1_f-tmp.h5" { +HDF5 "old_h5fc_ext1_f-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl index 67a71164ac8..1ea166339ca 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext1_i-tmp.h5" { +HDF5 "old_h5fc_ext1_i-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 1 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl index 7f67d9f72a9..eeaece7af78 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext1_s-tmp.h5" { +HDF5 "old_h5fc_ext1_s-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl index 350d3ba4eb3..40a7f69f34e 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext2_if-tmp.h5" { +HDF5 "old_h5fc_ext2_if-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl index 6b2b2c366fa..d83042ae451 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext2_is-tmp.h5" { +HDF5 "old_h5fc_ext2_is-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl index 4a038e381d9..4cc7b2f5f58 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext2_sf-tmp.h5" { +HDF5 "old_h5fc_ext2_sf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl index 602627f4614..e3bb99437e6 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext3_isf-tmp.h5" { +HDF5 "old_h5fc_ext3_isf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/testh5fc.sh.in b/tools/test/h5format_convert/testh5fc.sh.in index 4ba46cbe88c..756156ebb02 100644 --- a/tools/test/h5format_convert/testh5fc.sh.in +++ b/tools/test/h5format_convert/testh5fc.sh.in @@ -62,7 +62,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5FORMCONV_TESTFILES="$SRC_TOOLS/test/h5format_convert/testfiles" SRC_H5FORMCONV_OUTFILES="$SRC_TOOLS/test/h5format_convert/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR # Copy the testfile to a temporary file for testing as h5format_convert is changing the file in place @@ -419,8 +419,15 @@ H5DUMP_CHECK() { actual="$TESTDIR/`basename $2 .ddl`.out" actual_err="$TESTDIR/`basename $2 .ddl`.err" testfile="`basename $2 .ddl`-tmp.h5" - $RUNSERIAL $H5DUMP_BIN -BH $TESTDIR/$testfile > $actual 2>$actual_err + # Run test. + ( + cd $TESTDIR + $RUNSERIAL $H5DUMP_BIN -BH $testfile + ) >$actual 2>$actual_err cat $actual_err >> $actual + cp $actual $actual_sav + cp $actual_err $actual_err_sav + STDERR_FILTER $actual_err # Compare output COMPARE_OUT $expect $actual diff --git a/tools/test/h5import/h5importtestutil.sh.in b/tools/test/h5import/h5importtestutil.sh.in index 65b899fc9c4..04582ee87c2 100644 --- a/tools/test/h5import/h5importtestutil.sh.in +++ b/tools/test/h5import/h5importtestutil.sh.in @@ -52,7 +52,7 @@ SRC_H5JAM_TESTFILES="$SRC_TOOLS/test/h5jam/testfiles" SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5jam/testh5jam.sh.in b/tools/test/h5jam/testh5jam.sh.in index 49598da07d0..ee34377047d 100644 --- a/tools/test/h5jam/testh5jam.sh.in +++ b/tools/test/h5jam/testh5jam.sh.in @@ -55,7 +55,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5JAM_OUTFILES="$SRC_TOOLS/test/h5jam/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5ls/h5ls_plugin.sh.in b/tools/test/h5ls/h5ls_plugin.sh.in index 3408876e0b0..8b606d6ff29 100644 --- a/tools/test/h5ls/h5ls_plugin.sh.in +++ b/tools/test/h5ls/h5ls_plugin.sh.in @@ -49,7 +49,7 @@ SRC_H5LS_TESTFILES="$SRC_TOOLS/test/h5ls/testfiles" SRC_H5DUMP_TESTFILES="$SRC_TOOLS/test/h5dump/testfiles" SRC_H5LS_OUTFILES="$SRC_TOOLS/test/h5ls/expected" -TESTDIR=./testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5ls/testh5ls.sh.in b/tools/test/h5ls/testh5ls.sh.in index fc6daab00ac..01f94024aa9 100644 --- a/tools/test/h5ls/testh5ls.sh.in +++ b/tools/test/h5ls/testh5ls.sh.in @@ -54,7 +54,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5LS_ERRFILES="$SRC_TOOLS/test/h5ls/errfiles" SRC_H5LS_OUTFILES="$SRC_TOOLS/test/h5ls/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5ls/testh5lsvds.sh.in b/tools/test/h5ls/testh5lsvds.sh.in index 2408ee29466..9038cf18ddd 100644 --- a/tools/test/h5ls/testh5lsvds.sh.in +++ b/tools/test/h5ls/testh5lsvds.sh.in @@ -51,7 +51,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5LS_OUTFILES="$SRC_TOOLS/test/h5ls/expected" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/vds +TESTDIR=./tmpvds test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir $TESTDIR @@ -145,7 +145,7 @@ CLEAN_TESTFILES_AND_TESTDIR() # skip rm if srcdir is same as destdir # this occurs when build/test performed in source dir and # make cp fail - SDIR=$SRC_H5LS_TESTFILES/vds + SDIR=$SRC_H5LS_TESTFILES INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then diff --git a/tools/test/h5repack/h5repack.sh.in b/tools/test/h5repack/h5repack.sh.in index f3b8d3eb9b9..ac213f1f4ba 100644 --- a/tools/test/h5repack/h5repack.sh.in +++ b/tools/test/h5repack/h5repack.sh.in @@ -68,7 +68,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5REPACK_OUTFILES="$SRC_TOOLS/test/h5repack/expected" -TESTDIR=./testpack +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5repack/h5repack_plugin.sh.in b/tools/test/h5repack/h5repack_plugin.sh.in index 43be1ee1c7b..a39cd7b967f 100644 --- a/tools/test/h5repack/h5repack_plugin.sh.in +++ b/tools/test/h5repack/h5repack_plugin.sh.in @@ -49,7 +49,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5REPACK_TESTFILES="$SRC_TOOLS/test/h5repack/testfiles" SRC_H5REPACK_OUTFILES="$SRC_TOOLS/test/h5repack/expected" -TESTDIR=testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in index 7ce0ad495e3..1d732c600ae 100644 --- a/tools/test/h5stat/testh5stat.sh.in +++ b/tools/test/h5stat/testh5stat.sh.in @@ -51,7 +51,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5STAT_OUTFILES="$SRC_TOOLS/test/h5stat/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/misc/testh5clear.sh.in b/tools/test/misc/testh5clear.sh.in index b5bf5cc3d83..2306d589938 100644 --- a/tools/test/misc/testh5clear.sh.in +++ b/tools/test/misc/testh5clear.sh.in @@ -44,7 +44,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5CLEAR_TESTFILES="$SRC_TOOLS/test/misc/testfiles" SRC_H5CLEAR_OUTFILES="$SRC_TOOLS/test/misc/expected" -TESTDIR=./testh5clear +TESTDIR=./tmpclr test -d $TESTDIR || mkdir -p $TESTDIR ###################################################################### diff --git a/tools/test/misc/testh5mkgrp.sh.in b/tools/test/misc/testh5mkgrp.sh.in index 676f6b04e55..297f89f9f19 100644 --- a/tools/test/misc/testh5mkgrp.sh.in +++ b/tools/test/misc/testh5mkgrp.sh.in @@ -43,7 +43,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5MKGRP_TESTFILES="$SRC_TOOLS/test/misc/testfiles" SRC_H5MKGRP_OUTFILES="$SRC_TOOLS/test/misc/expected" -TESTDIR=./testgrp +TESTDIR=./tmpmkg test -d $TESTDIR || mkdir -p $TESTDIR ###################################################################### diff --git a/tools/test/misc/testh5repart.sh.in b/tools/test/misc/testh5repart.sh.in index addd0db9e22..e101b088067 100644 --- a/tools/test/misc/testh5repart.sh.in +++ b/tools/test/misc/testh5repart.sh.in @@ -40,7 +40,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5REPART_TESTFILES="$SRC_TOOLS/test/misc/testfiles" -TESTDIR=./testrepart +TESTDIR=./tmprp test -d $TESTDIR || mkdir -p $TESTDIR # diff --git a/utils/tools/test/h5dwalk/copy_demo_files.sh.in b/utils/tools/test/h5dwalk/copy_demo_files.sh.in index 02df202ccf9..8ccc5e8b678 100644 --- a/utils/tools/test/h5dwalk/copy_demo_files.sh.in +++ b/utils/tools/test/h5dwalk/copy_demo_files.sh.in @@ -30,7 +30,7 @@ exit_code=$EXIT_SUCCESS # Add Testing files into the local testfiles directory:: -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR echo "HDF5 \"$THIS_DIR/testfiles/h5diff_basic1.h5\" {" > "$THIS_DIR"/testfiles/h5diff_basic1.h5_h5dump.txt diff --git a/utils/tools/test/h5dwalk/testh5dwalk.sh.in b/utils/tools/test/h5dwalk/testh5dwalk.sh.in index a123f8d98cb..694dad01e3b 100644 --- a/utils/tools/test/h5dwalk/testh5dwalk.sh.in +++ b/utils/tools/test/h5dwalk/testh5dwalk.sh.in @@ -68,7 +68,7 @@ SRC_H5JAM_TESTFILES="$SRC_TOOLS/h5jam/testfiles" SRC_H5DWALK_TESTFILES="$SRC_TOOLS/h5dwalk/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/h5import/testfiles" -TESTDIR=./testfiles +TESTDIR=./tmpdw test -d $TESTDIR || mkdir $TESTDIR echo "SRC_H5DIFF_TESTFILES = $SRC_H5DIFF_TESTFILES" From 2d551c9b57a176dd23654e42e32d42f8e5aeb21f Mon Sep 17 00:00:00 2001 From: Glenn Song <43005495+glennsong09@users.noreply.github.com> Date: Fri, 20 Oct 2023 13:28:10 -0500 Subject: [PATCH 033/101] Add -h and --help as flags in h5cc & h5fc (#3729) Adds these common help flags in addition to -help --- bin/h5cc.in | 22 ++++++++++++++-------- fortran/src/h5fc.in | 22 ++++++++++++++-------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/bin/h5cc.in b/bin/h5cc.in index 4eef3c95eee..e3dc988a576 100644 --- a/bin/h5cc.in +++ b/bin/h5cc.in @@ -116,15 +116,15 @@ usage() { # A wonderfully informative "usage" message. echo "usage: $prog_name [OPTIONS] " echo " OPTIONS:" - echo " -help This help message." - echo " -echo Show all the shell commands executed" - echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" - echo " subdirectories [default: $prefix]" - echo " -show Show the commands without executing them" - echo " -showconfig Show the HDF5 library configuration summary" - echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" + echo " -help | --help | -h This help message." + echo " -echo Show all the shell commands executed" + echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" + echo " subdirectories [default: $prefix]" + echo " -show Show the commands without executing them" + echo " -showconfig Show the HDF5 library configuration summary" + echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" echo " without static libraries]" - echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" + echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" echo " with static libraries]" echo " " echo " - the normal compile line options for your compiler." @@ -256,6 +256,12 @@ for arg in $@ ; do -help) usage ;; + --help) + usage + ;; + -h) + usage + ;; *\"*) qarg="'"$arg"'" allargs="$allargs $qarg" diff --git a/fortran/src/h5fc.in b/fortran/src/h5fc.in index b793648d854..c5da815f3f6 100644 --- a/fortran/src/h5fc.in +++ b/fortran/src/h5fc.in @@ -110,15 +110,15 @@ usage() { # A wonderfully informative "usage" message. echo "usage: $prog_name [OPTIONS] " echo " OPTIONS:" - echo " -help This help message." - echo " -echo Show all the shell commands executed" - echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" - echo " subdirectories [default: $prefix]" - echo " -show Show the commands without executing them" - echo " -showconfig Show the HDF5 library configuration summary" - echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" + echo " -help | --help | -h This help message." + echo " -echo Show all the shell commands executed" + echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" + echo " subdirectories [default: $prefix]" + echo " -show Show the commands without executing them" + echo " -showconfig Show the HDF5 library configuration summary" + echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" echo " without static libraries]" - echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" + echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" echo " with static libraries]" echo " " echo " - the normal compile line options for your compiler." @@ -230,6 +230,12 @@ for arg in $@ ; do -help) usage ;; + --help) + usage + ;; + -h) + usage + ;; *\"*) qarg="'"$arg"'" allargs="$allargs $qarg" From 709367cb1a0d727d0e3fe07471c07cb184dd0b44 Mon Sep 17 00:00:00 2001 From: bmribler <39579120+bmribler@users.noreply.github.com> Date: Fri, 20 Oct 2023 15:17:46 -0400 Subject: [PATCH 034/101] Update the library version matrix for H5Pset_libver_bounds() (#3702) * Fixed #3524 Added 1.12, 1.14, and 1.16 to the table for libver bounds in the H5Pset_libver_bounds docs. --- src/H5Ppublic.h | 308 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 288 insertions(+), 20 deletions(-) diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index d822925bc0d..d79c7d785fd 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -4837,9 +4837,8 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * enumerated value in #H5F_libver_t, indicating that this is * currently the latest format available. * - * The library supports the following five pairs of - * (\p low, \p high) combinations as derived from the values - * in #H5F_libver_t: + * The library supports the following pairs of (\p low, \p high) + * combinations as derived from the values in #H5F_libver_t: * * * @@ -4866,11 +4865,53 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \li The library will create objects with the earliest possible * format versions. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.10.x. - * Since 1.10.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. For example, if a newer - * format version is required to support a feature e.g. virtual - * dataset, this setting will allow the object to be created. + * format versions available to library release 1.10.x. Note + * that as 1.10.11 is the last release of the 1.10 series. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.10.x + * release will fail. + * + * + * + * + * + * + * + * + * + * + * + * + * @@ -4894,11 +4935,54 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \li The library will create objects with the latest format * versions available to library release 1.8.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.10.x. - * Since 1.10.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. For example, if a - * newer format version is required to support a feature e.g. - * virtual dataset, this setting will allow the object to be + * format versions available to library release 1.10.x. Note + * that 1.10.11 is the last release of the 1.10 series. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.10.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. + * + * + * + * + * + * + * + * + * + * + * + * @@ -4911,11 +4995,196 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \li The library will create objects with the latest format * versions available to library release 1.10.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.10.x. - * Since 1.10.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. For example, if a - * newer format version is required to support a feature e.g. - * virtual dataset, this setting will allow the object to be + * format versions available to library release 1.10.x. Note + * that 1.10.11 is the last release of the 1.10 series. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.10.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * * *
\p low=#H5F_LIBVER_EARLIEST
+ * \p high=#H5F_LIBVER_V112
+ * \li The library will create objects with the earliest possible + * format versions. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. Note + * that as 1.12.3 is the last release of the 1.12 series. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.12.x + * release will fail. + *
\p low=#H5F_LIBVER_EARLIEST
+ * \p high=#H5F_LIBVER_V114
+ * \li The library will create objects with the earliest possible + * format versions. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.14.x + * release will fail. + *
\p low=#H5F_LIBVER_EARLIEST
+ * \p high=#H5F_LIBVER_V116
+ * \li The library will create objects with the earliest possible + * format versions. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. + * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be + * created. * \li This is the library default setting and provides the greatest * format compatibility. *
\p low=#H5F_LIBVER_V18
+ * \p high=#H5F_LIBVER_V112
+ * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.12.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V18
+ * \p high=#H5F_LIBVER_V114
+ * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.14.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V18
+ * \p high=#H5F_LIBVER_V116
+ * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. + * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be * created. * \li Earlier versions of the library may not be able to access * objects created with this setting.
\p low=#H5F_LIBVER_V110
+ * \p high=#H5F_LIBVER_V112 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.10.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.12.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V110
+ * \p high=#H5F_LIBVER_V114 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.10.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.14.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V110
+ * \p high=#H5F_LIBVER_V116 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.10.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. + * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be + * created. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * However, objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. + *
\p low=#H5F_LIBVER_V112
+ * \p high=#H5F_LIBVER_V112 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.12.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.12.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V112
+ * \p high=#H5F_LIBVER_V114 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.12.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.14.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V112
+ * \p high=#H5F_LIBVER_V116 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.12.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. + * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be + * created. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * However, objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. + *
\p low=#H5F_LIBVER_V114
+ * \p high=#H5F_LIBVER_V114 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.14.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.14.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
\p low=#H5F_LIBVER_V114
+ * \p high=#H5F_LIBVER_V116 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.14.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. + * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be + * created. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * However, objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. + *
\p low=#H5F_LIBVER_V116
+ * \p high=#H5F_LIBVER_V116 + *
+ * \li The library will create objects with the latest format + * versions available to library release 1.16.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. + * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be * created. * \li This setting allows users to take advantage of the latest * features and performance enhancements in the library. @@ -4923,8 +5192,7 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * accessible to a smaller range of library versions than * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li Earlier versions of the library may not be able to access - * objects created with this - * setting. + * objects created with this setting. *
From e01ea706e21b323d17b7b5c6cdaad22602b5573c Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 20 Oct 2023 12:46:26 -0700 Subject: [PATCH 035/101] Add missing test files to distclean target (#3734) Cleans up new files in Autotools `make distclean` in the test directory --- test/Makefile.am | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/Makefile.am b/test/Makefile.am index 291907ca0f9..9fd7b94625c 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -192,7 +192,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 mdset.h5 compact_dataset.h5 dataset.h5 d storage_size.h5 dls_01_strings.h5 power2up.h5 version_bounds.h5 \ alloc_0sized.h5 h5s_block.h5 h5s_plist.h5 \ extend.h5 istore.h5 extlinks*.h5 frspace.h5 links*.h5 \ - sys_file1 tfile[1-7].h5 th5s[1-4].h5 lheap.h5 fheap.h5 ohdr.h5 \ + sys_file1 tfile[1-8].h5 th5s[1-4].h5 lheap.h5 fheap.h5 ohdr.h5 \ stab.h5 extern_[1-5].h5 extern_[1-4][rw].raw gheap[0-4].h5 \ ohdr_min_a.h5 ohdr_min_b.h5 min_dset_ohdr_testfile.h5 \ dt_arith[1-2] links.h5 links[0-6]*.h5 extlinks[0-15].h5 \ @@ -226,7 +226,10 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 mdset.h5 compact_dataset.h5 dataset.h5 d test_swmr*.h5 cache_logging.h5 cache_logging.out vds_swmr.h5 vds_swmr_src_*.h5 \ swmr[0-2].h5 swmr_writer.out swmr_writer.log.* swmr_reader.out.* swmr_reader.log.* \ tbogus.h5.copy cache_image_test.h5 direct_chunk.h5 native_vol_test.h5 \ - splitter*.h5 splitter.log mirror_rw mirror_ro event_set_[0-9].h5 + splitter*.h5 splitter.log mirror_rw mirror_ro event_set_[0-9].h5 \ + cmpd_dtransform.h5 single_latest.h5 source_file.h5 stdio_file.h5 \ + tfile_is_accessible.h5 tfile_is_accessible_non_hdf5.h5 tverbounds_dtype.h5 \ + virtual_file1.h5 # Sources for testhdf5 executable testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \ From 98b5779ea54edd011d1d3197117ed0571ec7ad6c Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Fri, 20 Oct 2023 13:50:57 -0700 Subject: [PATCH 036/101] Add tools/libtest to Autotools builds (#3735) This was only added to CMake many years ago and tests the tools library. --- hl/test/Makefile.am | 2 +- tools/Makefile.am | 2 +- tools/libtest/Makefile.am | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hl/test/Makefile.am b/hl/test/Makefile.am index 1d1cb0f92fb..6f66291b25d 100644 --- a/hl/test/Makefile.am +++ b/hl/test/Makefile.am @@ -20,7 +20,7 @@ include $(top_srcdir)/config/commence.am # Add include directories to C preprocessor flags AM_CPPFLAGS+=-I. -I$(srcdir) -I$(top_builddir)/src -I$(top_srcdir)/src -I$(top_builddir)/test -I$(top_srcdir)/test -I$(top_srcdir)/hl/src -# The tests depend on the hdf5, hdf5 test, and hdf5_hl libraries +# The tests depend on the hdf5, hdf5 test, and hdf5_hl libraries LDADD=$(LIBH5_HL) $(LIBH5TEST) $(LIBHDF5) # Test programs. These are our main targets. They should be listed in the diff --git a/tools/Makefile.am b/tools/Makefile.am index 7db4040d0eb..d0a6c5c5bc4 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -19,7 +19,7 @@ include $(top_srcdir)/config/commence.am if BUILD_TESTS_CONDITIONAL - TESTSERIAL_DIR =test + TESTSERIAL_DIR=libtest test else TESTSERIAL_DIR= endif diff --git a/tools/libtest/Makefile.am b/tools/libtest/Makefile.am index 835667c74b0..45b3f476df7 100644 --- a/tools/libtest/Makefile.am +++ b/tools/libtest/Makefile.am @@ -19,11 +19,11 @@ include $(top_srcdir)/config/commence.am -# Include src and tools/lib directories -AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib +# Include src, test, and tools/lib directories +AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test -I$(top_srcdir)/tools/lib -# All programs depend on the hdf5 and h5tools libraries -LDADD=$(LIBH5TOOLS) $(LIBHDF5) +# All programs depend on the hdf5, hdf5 test, and h5tools libraries +LDADD=$(LIBH5TOOLS) $(LIBH5TEST) $(LIBHDF5) # main target From 7843db55ff046ba2d044647add5f1861bfdd33bc Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 21 Oct 2023 11:44:03 -0700 Subject: [PATCH 037/101] Clean up onion VFD files in tools `make clean` (#3739) Cleans up h5dump and h5diff *.onion files in the Autotools when runing `make clean`. --- tools/test/h5diff/Makefile.am | 3 +-- tools/test/h5dump/Makefile.am | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/test/h5diff/Makefile.am b/tools/test/h5diff/Makefile.am index b561d722275..f920afab74a 100644 --- a/tools/test/h5diff/Makefile.am +++ b/tools/test/h5diff/Makefile.am @@ -60,8 +60,7 @@ endif # Temporary files. *.h5 are generated by h5diff. They should # be copied to the testfiles/ directory if update is required -CHECK_CLEANFILES+=*.h5 expect_sorted actual_sorted - +CHECK_CLEANFILES+=*.h5 *.onion expect_sorted actual_sorted DISTCLEANFILES=testh5diff.sh h5diff_plugin.sh include $(top_srcdir)/config/conclude.am diff --git a/tools/test/h5dump/Makefile.am b/tools/test/h5dump/Makefile.am index a79b0fe8b73..619647c670e 100644 --- a/tools/test/h5dump/Makefile.am +++ b/tools/test/h5dump/Makefile.am @@ -45,7 +45,7 @@ endif # Temporary files. *.h5 are generated by h5dumpgentest. They should # copied to the testfiles/ directory if update is required. -CHECK_CLEANFILES+=*.h5 *.bin +CHECK_CLEANFILES+=*.h5 *.bin *.onion DISTCLEANFILES=testh5dump.sh testh5dumppbits.sh testh5dumpxml.sh h5dump_plugin.sh include $(top_srcdir)/config/conclude.am From 29b27b77619d7260aa39e45e1c04369f32f094ca Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 21 Oct 2023 11:44:16 -0700 Subject: [PATCH 038/101] Clean Java test files on Autotools (#3740) Removes generated HDF5 and text output files when running `make clean`. --- java/test/Makefile.am | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/java/test/Makefile.am b/java/test/Makefile.am index 9f39be9ca1c..7f6ab0169a8 100644 --- a/java/test/Makefile.am +++ b/java/test/Makefile.am @@ -90,7 +90,8 @@ noinst_DATA = $(jarfile) check_SCRIPTS = junit.sh TEST_SCRIPT = $(check_SCRIPTS) -CLEANFILES = classnoinst.stamp $(jarfile) $(JAVAROOT)/$(pkgpath)/*.class junit.sh +CLEANFILES = classnoinst.stamp $(jarfile) $(JAVAROOT)/$(pkgpath)/*.class junit.sh \ + *.h5 testExport*.txt clean: rm -rf $(JAVAROOT)/* From 4b2d2eccdd8be50814ca2ed5e37e6d77b5a50340 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 21 Oct 2023 11:44:33 -0700 Subject: [PATCH 039/101] Clean the flushrefresh test dir on Autotools (#3741) The flushrefresh_test directory was not being cleaned up w/ `make clean` under the Autotools --- test/Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/Makefile.am b/test/Makefile.am index 9fd7b94625c..3146e0174d0 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -229,7 +229,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 mdset.h5 compact_dataset.h5 dataset.h5 d splitter*.h5 splitter.log mirror_rw mirror_ro event_set_[0-9].h5 \ cmpd_dtransform.h5 single_latest.h5 source_file.h5 stdio_file.h5 \ tfile_is_accessible.h5 tfile_is_accessible_non_hdf5.h5 tverbounds_dtype.h5 \ - virtual_file1.h5 + virtual_file1.h5 flushrefresh_test # Sources for testhdf5 executable testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \ From 4dfde6bac7395386379072c532560fbe8a5b6f79 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sat, 21 Oct 2023 17:08:12 -0700 Subject: [PATCH 040/101] Fix file names in tfile.c (#3743) Some tests in tfile.c use h5_fileaccess to get a VFD-dependent file name but use the scheme from testhdf5, reusing the FILE1 and FILE8 names. This leads to files like test1.h5.h5 which are unintended and not cleaned up. This changes the filename scheme for a few tests to work with h5test, resulting in more informative names and allowing the files to be cleaned up at the end of the test. The test files have also been added to the `make clean` target for the Autotools. --- test/Makefile.am | 2 +- test/tfile.c | 21 +++++++++++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/test/Makefile.am b/test/Makefile.am index 3146e0174d0..fdd83e5bdeb 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -229,7 +229,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 mdset.h5 compact_dataset.h5 dataset.h5 d splitter*.h5 splitter.log mirror_rw mirror_ro event_set_[0-9].h5 \ cmpd_dtransform.h5 single_latest.h5 source_file.h5 stdio_file.h5 \ tfile_is_accessible.h5 tfile_is_accessible_non_hdf5.h5 tverbounds_dtype.h5 \ - virtual_file1.h5 flushrefresh_test + virtual_file1.h5 tfile_double_open.h5 tfile_incr_filesize.h5 flushrefresh_test # Sources for testhdf5 executable testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \ diff --git a/test/tfile.c b/test/tfile.c index 1c5196acf60..24cc7ce000e 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -138,9 +138,15 @@ #define NGROUPS 2 #define NDSETS 4 -/* Declaration for test_incr_filesize() */ +/* Declaration for libver bounds tests */ #define FILE8 "tfile8.h5" /* Test file */ +/* Declaration for test_file_double_file_dataset_open() */ +#define FILE_DOUBLE_OPEN "tfile_double_open" + +/* Declaration for test_incr_filesize() */ +#define FILE_INCR_FILESIZE "tfile_incr_filesize" + /* Files created under 1.6 branch and 1.8 branch--used in test_filespace_compatible() */ static const char *OLD_FILENAME[] = { "filespace_1_6.h5", /* 1.6 HDF5 file */ @@ -2623,8 +2629,8 @@ test_file_double_file_dataset_open(bool new_format) if (new_format) { ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); CHECK(ret, FAIL, "H5Pset_libver_bounds"); - } /* end if */ - h5_fixname(FILE1, fapl, filename, sizeof filename); + } + h5_fixname(FILE_DOUBLE_OPEN, fapl, filename, sizeof filename); /* Create the test file */ fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); @@ -2934,6 +2940,9 @@ test_file_double_file_dataset_open(bool new_format) ret = H5Tclose(tid1); CHECK(ret, FAIL, "H5Tclose"); + /* Delete the test file */ + h5_delete_test_file(filename, fapl); + /* Close FAPL */ ret = H5Pclose(fapl); CHECK(ret, FAIL, "H5Pclose"); @@ -7650,7 +7659,7 @@ test_incr_filesize(void) MESSAGE(5, ("Testing H5Fincrement_filesize() and H5Fget_eoa())\n")); fapl = h5_fileaccess(); - h5_fixname(FILE8, fapl, filename, sizeof filename); + h5_fixname(FILE_INCR_FILESIZE, fapl, filename, sizeof filename); /* Get the VFD feature flags */ driver_id = H5Pget_driver(fapl); @@ -7735,6 +7744,9 @@ test_incr_filesize(void) /* Verify the filesize is the previous stored_eoa + 512 */ VERIFY(filesize, stored_eoa + 512, "file size"); + /* Delete the test file */ + h5_delete_test_file(FILE_INCR_FILESIZE, fapl); + /* Close the file access property list */ ret = H5Pclose(fapl); CHECK(ret, FAIL, "H5Pclose"); @@ -8225,6 +8237,7 @@ cleanup_file(void) H5Fdelete(FILE5, H5P_DEFAULT); H5Fdelete(FILE6, H5P_DEFAULT); H5Fdelete(FILE7, H5P_DEFAULT); + H5Fdelete(FILE8, H5P_DEFAULT); H5Fdelete(DST_FILE, H5P_DEFAULT); } H5E_END_TRY From 21ec3730c5d625995cd406e29ae5cb84dc45a11c Mon Sep 17 00:00:00 2001 From: bmribler <39579120+bmribler@users.noreply.github.com> Date: Sat, 21 Oct 2023 20:15:12 -0400 Subject: [PATCH 041/101] Additional update to the library version matrix for H5Pset_libver_bounds() (#3742) This is the follow-up of PR #3702. --- src/H5Ppublic.h | 232 ++++++++++++++++++------------------------------ 1 file changed, 87 insertions(+), 145 deletions(-) diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index d79c7d785fd..3a059105a11 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -4849,14 +4849,13 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \p low=#H5F_LIBVER_EARLIEST
* \p high=#H5F_LIBVER_V18 * - * \li The library will create objects with the earliest - * possible format versions. - * \li The library will allow objects to be created with the - * latest format versions available to library release 1.8.x. - * \li API calls that create objects or features that are - * available to versions of the library greater than 1.8.x - * release will fail. - * + * \li The library will create objects with the earliest + * possible format versions. + * \li The library will allow objects to be created with the + * latest format versions available to library release 1.8.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.8.x + * release will fail. * * * \p low=#H5F_LIBVER_EARLIEST
@@ -4865,12 +4864,10 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \li The library will create objects with the earliest possible * format versions. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.10.x. Note - * that as 1.10.11 is the last release of the 1.10 series. + * format versions available to library release 1.10.x. * \li API calls that create objects or features that are * available to versions of the library greater than 1.10.x - * release will fail. - * + * release will fail. * * * \p low=#H5F_LIBVER_EARLIEST
@@ -4879,12 +4876,10 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \li The library will create objects with the earliest possible * format versions. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.12.x. Note - * that as 1.12.3 is the last release of the 1.12 series. + * format versions available to library release 1.12.x. * \li API calls that create objects or features that are * available to versions of the library greater than 1.12.x - * release will fail. - * + * release will fail. * * * \p low=#H5F_LIBVER_EARLIEST
@@ -4896,8 +4891,7 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * format versions available to library release 1.14.x. * \li API calls that create objects or features that are * available to versions of the library greater than 1.14.x - * release will fail. - * + * release will fail. * * * \p low=#H5F_LIBVER_EARLIEST
@@ -4906,15 +4900,10 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \li The library will create objects with the earliest possible * format versions. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.16.x. - * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. That is, if a - * newer format version is required to support a feature - * in 1.16.x series, this setting will allow the object to be - * created. + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. * \li This is the library default setting and provides the greatest - * format compatibility. - * + * format compatibility. * * * \p low=#H5F_LIBVER_V18
@@ -4922,6 +4911,11 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * * \li The library will create objects with the latest format * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.8.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li API calls that create objects or features that are available * to versions of the library greater than 1.8.x release will * fail. @@ -4932,71 +4926,66 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \p low=#H5F_LIBVER_V18
* \p high=#H5F_LIBVER_V110 * - * \li The library will create objects with the latest format - * versions available to library release 1.8.x. - * \li The library will allow objects to be created with the latest - * format versions available to library release 1.10.x. Note - * that 1.10.11 is the last release of the 1.10 series. - * \li API calls that create objects or features that are - * available to versions of the library greater than 1.10.x - * release will fail. - * \li Earlier versions of the library may not be able to access - * objects created with this setting. + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.10.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.10.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. * * * \p low=#H5F_LIBVER_V18
* \p high=#H5F_LIBVER_V112 * - * \li The library will create objects with the latest format - * versions available to library release 1.8.x. - * \li The library will allow objects to be created with the latest - * format versions available to library release 1.12.x. - * \li API calls that create objects or features that are - * available to versions of the library greater than 1.12.x - * release will fail. - * \li Earlier versions of the library may not be able to access - * objects created with this setting. + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.12.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. * * * \p low=#H5F_LIBVER_V18
* \p high=#H5F_LIBVER_V114 * - * \li The library will create objects with the latest format - * versions available to library release 1.8.x. - * \li The library will allow objects to be created with the latest - * format versions available to library release 1.14.x. - * \li API calls that create objects or features that are - * available to versions of the library greater than 1.14.x - * release will fail. - * \li Earlier versions of the library may not be able to access - * objects created with this setting. + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.14.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. * * * \p low=#H5F_LIBVER_V18
* \p high=#H5F_LIBVER_V116 * - * \li The library will create objects with the latest format - * versions available to library release 1.8.x. - * \li The library will allow objects to be created with the latest - * format versions available to library release 1.16.x. - * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. That is, if a - * newer format version is required to support a feature - * in 1.16.x series, this setting will allow the object to be - * created. - * \li Earlier versions of the library may not be able to access - * objects created with this setting. + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * \li Earlier versions of the library may not be able to access + * objects created with this setting. * * * \p low=#H5F_LIBVER_V110
- * \p high=#H5F_LIBVER_V110 - * + * \p high=#H5F_LIBVER_V110 * * \li The library will create objects with the latest format * versions available to library release 1.10.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.10.x. Note - * that 1.10.11 is the last release of the 1.10 series. + * format versions available to library release 1.10.x. * \li The objects written with this setting may be * accessible to a smaller range of library versions than * would be the case if low is set to #H5F_LIBVER_EARLIEST. @@ -5005,72 +4994,52 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * fail. * \li Earlier versions of the library may not be able to access * objects created with this setting. - * * * * \p low=#H5F_LIBVER_V110
- * \p high=#H5F_LIBVER_V112 - * + * \p high=#H5F_LIBVER_V112 * * \li The library will create objects with the latest format * versions available to library release 1.10.x. * \li The library will allow objects to be created with the latest * format versions available to library release 1.12.x. - * \li The objects written with this setting may be - * accessible to a smaller range of library versions than - * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li API calls that create objects or features that are available * to versions of the library greater than 1.12.x release will * fail. * \li Earlier versions of the library may not be able to access * objects created with this setting. - * * * * \p low=#H5F_LIBVER_V110
- * \p high=#H5F_LIBVER_V114 - * + * \p high=#H5F_LIBVER_V114 * * \li The library will create objects with the latest format * versions available to library release 1.10.x. * \li The library will allow objects to be created with the latest * format versions available to library release 1.14.x. - * \li The objects written with this setting may be - * accessible to a smaller range of library versions than - * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li API calls that create objects or features that are available * to versions of the library greater than 1.14.x release will * fail. * \li Earlier versions of the library may not be able to access * objects created with this setting. - * * * * \p low=#H5F_LIBVER_V110
- * \p high=#H5F_LIBVER_V116 - * + * \p high=#H5F_LIBVER_V116 * * \li The library will create objects with the latest format * versions available to library release 1.10.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.16.x. - * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. That is, if a - * newer format version is required to support a feature - * in 1.16.x series, this setting will allow the object to be - * created. + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. * \li This setting allows users to take advantage of the latest * features and performance enhancements in the library. - * However, objects written with this setting may be - * accessible to a smaller range of library versions than - * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li Earlier versions of the library may not be able to access - * objects created with this setting. - * + * objects created with this setting. + * * * \p low=#H5F_LIBVER_V112
- * \p high=#H5F_LIBVER_V112 - * + * \p high=#H5F_LIBVER_V112 * * \li The library will create objects with the latest format * versions available to library release 1.12.x. @@ -5084,55 +5053,38 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * fail. * \li Earlier versions of the library may not be able to access * objects created with this setting. - * * * * \p low=#H5F_LIBVER_V112
- * \p high=#H5F_LIBVER_V114 - * + * \p high=#H5F_LIBVER_V114 * * \li The library will create objects with the latest format * versions available to library release 1.12.x. * \li The library will allow objects to be created with the latest * format versions available to library release 1.14.x. - * \li The objects written with this setting may be - * accessible to a smaller range of library versions than - * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li API calls that create objects or features that are available * to versions of the library greater than 1.14.x release will * fail. * \li Earlier versions of the library may not be able to access * objects created with this setting. - * - * * * * \p low=#H5F_LIBVER_V112
- * \p high=#H5F_LIBVER_V116 - * + * \p high=#H5F_LIBVER_V116 * * \li The library will create objects with the latest format * versions available to library release 1.12.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.16.x. - * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. That is, if a - * newer format version is required to support a feature - * in 1.16.x series, this setting will allow the object to be - * created. + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. * \li This setting allows users to take advantage of the latest * features and performance enhancements in the library. - * However, objects written with this setting may be - * accessible to a smaller range of library versions than - * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li Earlier versions of the library may not be able to access - * objects created with this setting. - * + * objects created with this setting. * * * \p low=#H5F_LIBVER_V114
- * \p high=#H5F_LIBVER_V114 - * + * \p high=#H5F_LIBVER_V114 * * \li The library will create objects with the latest format * versions available to library release 1.14.x. @@ -5146,57 +5098,47 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * fail. * \li Earlier versions of the library may not be able to access * objects created with this setting. - * - * * * * \p low=#H5F_LIBVER_V114
- * \p high=#H5F_LIBVER_V116 - * + * \p high=#H5F_LIBVER_V116 * * \li The library will create objects with the latest format * versions available to library release 1.14.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.16.x. - * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. That is, if a - * newer format version is required to support a feature - * in 1.16.x series, this setting will allow the object to be - * created. + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. * \li This setting allows users to take advantage of the latest * features and performance enhancements in the library. - * However, objects written with this setting may be - * accessible to a smaller range of library versions than - * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li Earlier versions of the library may not be able to access - * objects created with this setting. - * + * objects created with this setting. * * * \p low=#H5F_LIBVER_V116
- * \p high=#H5F_LIBVER_V116 - * + * \p high=#H5F_LIBVER_V116 * * \li The library will create objects with the latest format * versions available to library release 1.16.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.16.x. - * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. That is, if a - * newer format version is required to support a feature - * in 1.16.x series, this setting will allow the object to be - * created. + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. * \li This setting allows users to take advantage of the latest * features and performance enhancements in the library. * However, objects written with this setting may be * accessible to a smaller range of library versions than * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li Earlier versions of the library may not be able to access - * objects created with this setting. - * + * objects created with this setting. * * * + * \note *H5F_LIBVER_LATEST*:
+ * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be + * created. + * * \version 1.10.2 #H5F_LIBVER_V18 added to the enumerated defines in * #H5F_libver_t. * From e4fb67c84aa4a4983e2cee4994235600d813a9ee Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Sun, 22 Oct 2023 18:45:22 -0700 Subject: [PATCH 042/101] Clean Autotools files in parallel tests (#3744) Adds missing files to `make clean` for parallel, including Fortran. --- fortran/testpar/Makefile.am | 2 +- testpar/Makefile.am | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/fortran/testpar/Makefile.am b/fortran/testpar/Makefile.am index 7f9f2846928..afdda980c5c 100644 --- a/fortran/testpar/Makefile.am +++ b/fortran/testpar/Makefile.am @@ -36,7 +36,7 @@ TEST_PROG_PARA=parallel_test subfiling_test async_test check_PROGRAMS=$(TEST_PROG_PARA) # Temporary files -CHECK_CLEANFILES+=parf[12].h5 subf.h5* +CHECK_CLEANFILES+=parf[12].h5 h5*_tests.h5 subf.h5* test_async_apis.mod # Test source files parallel_test_SOURCES=ptest.F90 hyper.F90 mdset.F90 multidsetrw.F90 diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 59d47e15ebf..4a8cb826f49 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -58,6 +58,7 @@ LDADD = $(LIBH5TEST) $(LIBHDF5) # after_mpi_fin.h5 is from t_init_term # go is used for debugging. See testphdf5.c. CHECK_CLEANFILES+=MPItest.h5 Para*.h5 bigio_test.h5 CacheTestDummy.h5 \ - ShapeSameTest.h5 shutdown.h5 pmulti_dset.h5 after_mpi_fin.h5 go + ShapeSameTest.h5 shutdown.h5 pmulti_dset.h5 after_mpi_fin.h5 go noflush.h5 \ + mpio_select_test_file.h5 *.btr include $(top_srcdir)/config/conclude.am From 5a00539e0dbec38894fe2e7ef7cdbef6d09f8dc2 Mon Sep 17 00:00:00 2001 From: mattjala <124107509+mattjala@users.noreply.github.com> Date: Mon, 23 Oct 2023 14:32:54 -0500 Subject: [PATCH 043/101] Add native VOL checks to deprecated functions (#3647) * Add native VOL checks to deprecated functions * Remove unneeded native VOL checks * Move native checks to top level calls --- src/H5Odeprec.c | 75 +++++++++++++++++++++++----- src/H5Rdeprec.c | 128 +++++++++++++++++++++++++++++++----------------- 2 files changed, 145 insertions(+), 58 deletions(-) diff --git a/src/H5Odeprec.c b/src/H5Odeprec.c index 6e8b34e789b..3de58185675 100644 --- a/src/H5Odeprec.c +++ b/src/H5Odeprec.c @@ -116,9 +116,10 @@ static herr_t H5O__iterate1_adapter(hid_t obj_id, const char *name, const H5O_info2_t *oinfo2, void *op_data) { H5O_visit1_adapter_t *shim_data = (H5O_visit1_adapter_t *)op_data; - H5O_info1_t oinfo; /* Deprecated object info struct */ - unsigned dm_fields; /* Fields for data model query */ - unsigned nat_fields; /* Fields for native query */ + H5O_info1_t oinfo; /* Deprecated object info struct */ + unsigned dm_fields; /* Fields for data model query */ + unsigned nat_fields; /* Fields for native query */ + H5VL_object_t *vol_obj; herr_t ret_value = H5_ITER_CONT; /* Return value */ FUNC_ENTER_PACKAGE @@ -158,7 +159,6 @@ H5O__iterate1_adapter(hid_t obj_id, const char *name, const H5O_info2_t *oinfo2, /* Check for retrieving native information */ nat_fields = shim_data->fields & (H5O_INFO_HDR | H5O_INFO_META_SIZE); if (nat_fields) { - H5VL_object_t *vol_obj; /* Object of obj_id */ H5VL_optional_args_t vol_cb_args; /* Arguments to VOL callback */ H5VL_native_object_optional_args_t obj_opt_args; /* Arguments for optional operation */ H5VL_loc_params_t loc_params; /* Location parameters for VOL callback */ @@ -401,7 +401,8 @@ H5Oget_info1(hid_t loc_id, H5O_info1_t *oinfo /*out*/) { H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ H5VL_loc_params_t loc_params; - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE2("e", "ix", loc_id, oinfo); @@ -418,6 +419,15 @@ H5Oget_info1(hid_t loc_id, H5O_info1_t *oinfo /*out*/) if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Oget_info1 is only meant to be used with the native VOL connector"); + /* Retrieve the object's information */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, H5O_INFO_ALL) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't get deprecated info for object"); @@ -441,7 +451,8 @@ H5Oget_info_by_name1(hid_t loc_id, const char *name, H5O_info1_t *oinfo /*out*/, { H5VL_object_t *vol_obj = NULL; /* object of loc_id */ H5VL_loc_params_t loc_params; - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE4("e", "i*sxi", loc_id, name, oinfo, lapl_id); @@ -468,6 +479,15 @@ H5Oget_info_by_name1(hid_t loc_id, const char *name, H5O_info1_t *oinfo /*out*/, if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Oget_info_by_name1 is only meant to be used with the native VOL connector"); + /* Retrieve the object's information */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, H5O_INFO_ALL) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't get deprecated info for object"); @@ -493,7 +513,8 @@ H5Oget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index_t idx_type, H { H5VL_object_t *vol_obj = NULL; /* object of loc_id */ H5VL_loc_params_t loc_params; - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE7("e", "i*sIiIohxi", loc_id, group_name, idx_type, order, n, oinfo, lapl_id); @@ -524,6 +545,15 @@ H5Oget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index_t idx_type, H if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Oget_info_by_idx1 is only meant to be used with the native VOL connector"); + /* Retrieve the object's information */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, H5O_INFO_ALL) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't get deprecated info for object"); @@ -574,7 +604,7 @@ H5Oget_info2(hid_t loc_id, H5O_info1_t *oinfo /*out*/, unsigned fields) "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Oget_info2 is only meant to be used with the native VOL connector"); + "Deprecated H5Oget_info2 is only meant to be used with the native VOL connector"); /* Retrieve deprecated info struct */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, fields) < 0) @@ -637,7 +667,7 @@ H5Oget_info_by_name2(hid_t loc_id, const char *name, H5O_info1_t *oinfo /*out*/, "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Oget_info_by_name2 is only meant to be used with the native VOL connector"); + "Deprecated H5Oget_info_by_name2 is only meant to be used with the native VOL connector"); /* Retrieve deprecated info struct */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, fields) < 0) @@ -706,7 +736,7 @@ H5Oget_info_by_idx2(hid_t loc_id, const char *group_name, H5_index_t idx_type, H "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Oget_info_by_idx2 is only meant to be used with the native VOL connector"); + "Deprecated H5Oget_info_by_idx2 is only meant to be used with the native VOL connector"); /* Retrieve deprecated info struct */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, fields) < 0) @@ -753,6 +783,7 @@ H5Ovisit1(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, H5O_iterate1 H5VL_loc_params_t loc_params; /* Location parameters for object access */ H5O_visit1_adapter_t shim_data; /* Adapter for passing app callback & user data */ herr_t ret_value; /* Return value */ + bool is_native_vol_obj = false; FUNC_ENTER_API(FAIL) H5TRACE5("e", "iIiIoOi*x", obj_id, idx_type, order, op, op_data); @@ -769,6 +800,15 @@ H5Ovisit1(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, H5O_iterate1 if (NULL == (vol_obj = H5VL_vol_object(obj_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Ovisit1 is only meant to be used with the native VOL connector"); + /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_SELF; loc_params.obj_type = H5I_get_type(obj_id); @@ -833,6 +873,7 @@ H5Ovisit_by_name1(hid_t loc_id, const char *obj_name, H5_index_t idx_type, H5_it H5VL_loc_params_t loc_params; /* Location parameters for object access */ H5O_visit1_adapter_t shim_data; /* Adapter for passing app callback & user data */ herr_t ret_value; /* Return value */ + bool is_native_vol_obj = false; FUNC_ENTER_API(FAIL) H5TRACE7("e", "i*sIiIoOi*xi", loc_id, obj_name, idx_type, order, op, op_data, lapl_id); @@ -857,6 +898,15 @@ H5Ovisit_by_name1(hid_t loc_id, const char *obj_name, H5_index_t idx_type, H5_it if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Ovisit_by_name1 is only meant to be used with the native VOL connector"); + /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_NAME; loc_params.loc_data.loc_by_name.name = obj_name; @@ -949,9 +999,10 @@ H5Ovisit2(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, H5O_iterate1 if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, H5I_INVALID_HID, "can't determine if VOL object is native connector object"); + if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Ovisit2 is only meant to be used with the native VOL connector"); + "Deprecated H5Ovisit2 is only meant to be used with the native VOL connector"); /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_SELF; @@ -1053,7 +1104,7 @@ H5Ovisit_by_name2(hid_t loc_id, const char *obj_name, H5_index_t idx_type, H5_it "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Ovisit_by_name2 is only meant to be used with the native VOL connector"); + "Deprecated H5Ovisit_by_name2 is only meant to be used with the native VOL connector"); /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_NAME; diff --git a/src/H5Rdeprec.c b/src/H5Rdeprec.c index 773d8b0c706..1d12ebae25a 100644 --- a/src/H5Rdeprec.c +++ b/src/H5Rdeprec.c @@ -101,14 +101,14 @@ H5R__decode_token_compat(H5VL_object_t *vol_obj, H5I_type_t type, H5R_type_t ref #ifndef NDEBUG { - bool is_native = false; /* Whether the src file is using the native VOL connector */ + bool is_native_vol_obj = false; /* Whether the src file is using the native VOL connector */ /* Check if using native VOL connector */ - if (H5VL_object_is_native(vol_obj, &is_native) < 0) + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, "can't query if file uses native VOL connector"); /* Must use native VOL connector for this operation */ - assert(is_native); + assert(is_native_vol_obj); } #endif /* NDEBUG */ @@ -251,7 +251,8 @@ H5Rget_obj_type1(hid_t id, H5R_type_t ref_type, const void *ref) H5O_token_t obj_token = {0}; /* Object token */ const unsigned char *buf = (const unsigned char *)ref; /* Reference buffer */ H5O_type_t obj_type = H5O_TYPE_UNKNOWN; /* Type of the referenced object */ - H5G_obj_t ret_value; /* Return value */ + bool is_native_vol_obj; /* Whether the native VOL connector is in use */ + H5G_obj_t ret_value; /* Return value */ FUNC_ENTER_API(H5G_UNKNOWN) H5TRACE3("Go", "iRt*x", id, ref_type, ref); @@ -266,6 +267,16 @@ H5Rget_obj_type1(hid_t id, H5R_type_t ref_type, const void *ref) if (NULL == (vol_obj = H5VL_vol_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5G_UNKNOWN, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rget_obj_type1 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5G_UNKNOWN, "invalid location identifier"); @@ -315,7 +326,8 @@ H5Rdereference1(hid_t obj_id, H5R_type_t ref_type, const void *ref) H5I_type_t opened_type; /* Opened object type */ void *opened_obj = NULL; /* Opened object */ const unsigned char *buf = (const unsigned char *)ref; /* Reference buffer */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + bool is_native_vol_obj; /* Whether the native VOL connector is in use */ + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) H5TRACE3("i", "iRt*x", obj_id, ref_type, ref); @@ -330,6 +342,16 @@ H5Rdereference1(hid_t obj_id, H5R_type_t ref_type, const void *ref) if (NULL == (vol_obj = H5VL_vol_object(obj_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rdereference1 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(obj_id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); @@ -382,8 +404,9 @@ H5Rcreate(void *ref, hid_t loc_id, const char *name, H5R_type_t ref_type, hid_t H5VL_file_get_args_t file_get_vol_cb_args; /* Arguments to VOL callback */ hid_t file_id = H5I_INVALID_HID; /* File ID for region reference */ void *vol_obj_file = NULL; - unsigned char *buf = (unsigned char *)ref; /* Return reference pointer */ - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; /* Whether the src file is using the native VOL connector */ + unsigned char *buf = (unsigned char *)ref; /* Return reference pointer */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE5("e", "*xi*sRti", ref, loc_id, name, ref_type, space_id); @@ -404,18 +427,13 @@ H5Rcreate(void *ref, hid_t loc_id, const char *name, H5R_type_t ref_type, hid_t if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); -#ifndef NDEBUG - { - bool is_native = false; /* Whether the src file is using the native VOL connector */ + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, "can't query if file uses native VOL connector"); - /* Check if using native VOL connector */ - if (H5VL_object_is_native(vol_obj, &is_native) < 0) - HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, "can't query if file uses native VOL connector"); - - /* Must use native VOL connector for this operation */ - assert(is_native); - } -#endif /* NDEBUG */ + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, "must use native VOL connector to create reference"); /* Get object type */ if ((vol_obj_type = H5I_get_type(loc_id)) < 0) @@ -500,13 +518,14 @@ H5Rcreate(void *ref, hid_t loc_id, const char *name, H5R_type_t ref_type, hid_t herr_t H5Rget_obj_type2(hid_t id, H5R_type_t ref_type, const void *ref, H5O_type_t *obj_type /*out*/) { - H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ - H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ - H5VL_object_get_args_t vol_cb_args; /* Arguments to VOL callback */ - H5VL_loc_params_t loc_params; /* Location parameters */ - H5O_token_t obj_token = {0}; /* Object token */ - const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ + H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ + H5VL_object_get_args_t vol_cb_args; /* Arguments to VOL callback */ + H5VL_loc_params_t loc_params; /* Location parameters */ + H5O_token_t obj_token = {0}; /* Object token */ + const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ + bool is_native_vol_obj = false; /* Whether the native VOL connector is in use */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE4("e", "iRt*xx", id, ref_type, ref, obj_type); @@ -521,6 +540,16 @@ H5Rget_obj_type2(hid_t id, H5R_type_t ref_type, const void *ref, H5O_type_t *obj if (NULL == (vol_obj = H5VL_vol_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rget_obj_type2 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); @@ -560,14 +589,15 @@ H5Rget_obj_type2(hid_t id, H5R_type_t ref_type, const void *ref, H5O_type_t *obj hid_t H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *ref) { - H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ - H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ - H5VL_loc_params_t loc_params; /* Location parameters */ - H5O_token_t obj_token = {0}; /* Object token */ - H5I_type_t opened_type; /* Opened object type */ - void *opened_obj = NULL; /* Opened object */ - const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ + H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ + H5VL_loc_params_t loc_params; /* Location parameters */ + H5O_token_t obj_token = {0}; /* Object token */ + H5I_type_t opened_type; /* Opened object type */ + void *opened_obj = NULL; /* Opened object */ + const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ + bool is_native_vol_obj = false; /* Whether the native VOL connector is in use */ + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) H5TRACE4("i", "iiRt*x", obj_id, oapl_id, ref_type, ref); @@ -588,6 +618,16 @@ H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *re if (NULL == (vol_obj = H5VL_vol_object(obj_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rdereference2 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(obj_id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); @@ -639,7 +679,8 @@ H5Rget_region(hid_t id, H5R_type_t ref_type, const void *ref) H5S_t *space = NULL; /* Dataspace object */ hid_t file_id = H5I_INVALID_HID; /* File ID for region reference */ const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ - hid_t ret_value; /* Return value */ + bool is_native_vol_obj = false; /* Whether the src file is using the native VOL connector */ + hid_t ret_value; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) H5TRACE3("i", "iRt*x", id, ref_type, ref); @@ -654,19 +695,14 @@ H5Rget_region(hid_t id, H5R_type_t ref_type, const void *ref) if (NULL == (vol_obj = H5VL_vol_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); -#ifndef NDEBUG - { - bool is_native = false; /* Whether the src file is using the native VOL connector */ - - /* Check if using native VOL connector */ - if (H5VL_object_is_native(vol_obj, &is_native) < 0) - HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, H5I_INVALID_HID, - "can't query if file uses native VOL connector"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, H5I_INVALID_HID, + "can't query if file uses native VOL connector"); - /* Must use native VOL connector for this operation */ - assert(is_native); - } -#endif /* NDEBUG */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rget_region is only meant to be used with the native VOL connector"); /* Get object type */ if ((vol_obj_type = H5I_get_type(id)) < 0) From a4c318de285e60d58a71ac5c3d7ee146f7e43824 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 23 Oct 2023 14:33:37 -0500 Subject: [PATCH 044/101] Fix buffer overflow in cache debugging code (#3691) --- src/H5Cmpio.c | 104 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 84 insertions(+), 20 deletions(-) diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index d7bf5b1dbda..643bbc80207 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -169,7 +169,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha haddr_t last_addr; #endif /* H5C_DO_SANITY_CHECKS */ #if H5C_APPLY_CANDIDATE_LIST__DEBUG - char tbl_buf[1024]; + char *tbl_buf = NULL; #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ unsigned m, n; unsigned u; /* Local index variable */ @@ -190,16 +190,48 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha memset(entries_to_clear, 0, sizeof(entries_to_clear)); #if H5C_APPLY_CANDIDATE_LIST__DEBUG - fprintf(stdout, "%s:%d: setting up candidate assignment table.\n", __func__, mpi_rank); + { + const char *const table_header = "candidate list = "; + size_t tbl_buf_size; + size_t tbl_buf_left; + size_t entry_nchars; + int bytes_printed; - memset(tbl_buf, 0, sizeof(tbl_buf)); + fprintf(stdout, "%s:%d: setting up candidate assignment table.\n", __func__, mpi_rank); - snprintf(tbl_buf, sizeof(tbl_buf), "candidate list = "); - for (u = 0; u < num_candidates; u++) - sprintf(&(tbl_buf[strlen(tbl_buf)]), " 0x%llx", (long long)(*(candidates_list_ptr + u))); - sprintf(&(tbl_buf[strlen(tbl_buf)]), "\n"); + /* Calculate maximum number of characters printed for each + * candidate entry, including the leading space and "0x" + */ + entry_nchars = (sizeof(long long) * CHAR_BIT / 4) + 3; + + tbl_buf_size = strlen(table_header) + (num_candidates * entry_nchars) + 1; + if (NULL == (tbl_buf = H5MM_malloc(tbl_buf_size))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate debug buffer"); + tbl_buf_left = tbl_buf_size; + + if ((bytes_printed = snprintf(tbl_buf, tbl_buf_left, table_header)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + + for (u = 0; u < num_candidates; u++) { + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, " 0x%llx", + (long long)(*(candidates_list_ptr + u)))) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + } + + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, "\n")) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed + 1; /* NUL terminator */ - fprintf(stdout, "%s", tbl_buf); + fprintf(stdout, "%s", tbl_buf); + + H5MM_free(tbl_buf); + tbl_buf = NULL; + } #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ if (f->shared->coll_md_write) { @@ -258,18 +290,50 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha last_entry_to_flush = candidate_assignment_table[mpi_rank + 1] - 1; #if H5C_APPLY_CANDIDATE_LIST__DEBUG - for (u = 0; u < 1024; u++) - tbl_buf[u] = '\0'; - snprintf(tbl_buf, sizeof(tbl_buf), "candidate assignment table = "); - for (u = 0; u <= (unsigned)mpi_size; u++) - sprintf(&(tbl_buf[strlen(tbl_buf)]), " %u", candidate_assignment_table[u]); - sprintf(&(tbl_buf[strlen(tbl_buf)]), "\n"); - fprintf(stdout, "%s", tbl_buf); - - fprintf(stdout, "%s:%d: flush entries [%u, %u].\n", __func__, mpi_rank, first_entry_to_flush, - last_entry_to_flush); - - fprintf(stdout, "%s:%d: marking entries.\n", __func__, mpi_rank); + { + const char *const table_header = "candidate assignment table = "; + unsigned umax = UINT_MAX; + size_t tbl_buf_size; + size_t tbl_buf_left; + size_t entry_nchars; + int bytes_printed; + + /* Calculate the maximum number of characters printed for each entry */ + entry_nchars = (size_t)(log10(umax) + 1) + 1; + + tbl_buf_size = strlen(table_header) + ((size_t)mpi_size * entry_nchars) + 1; + if (NULL == (tbl_buf = H5MM_malloc(tbl_buf_size))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate debug buffer"); + tbl_buf_left = tbl_buf_size; + + if ((bytes_printed = snprintf(tbl_buf, tbl_buf_left, table_header)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + + for (u = 0; u <= (unsigned)mpi_size; u++) { + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, " %u", + candidate_assignment_table[u])) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + } + + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, "\n")) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed + 1; /* NUL terminator */ + + fprintf(stdout, "%s", tbl_buf); + + H5MM_free(tbl_buf); + tbl_buf = NULL; + + fprintf(stdout, "%s:%d: flush entries [%u, %u].\n", __func__, mpi_rank, first_entry_to_flush, + last_entry_to_flush); + + fprintf(stdout, "%s:%d: marking entries.\n", __func__, mpi_rank); + } #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ for (u = 0; u < num_candidates; u++) { From 34f4569cdd65c51476fe4d295b87de7a9c90f184 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Mon, 23 Oct 2023 14:35:42 -0500 Subject: [PATCH 045/101] update stat arg for apple (#3726) * update stat arg for apple * use H5_HAVE_DARWIN for Apple ifdef * fix typo * removed duplicate H5_ih_info_t * added fortran async test to cmake --- fortran/src/H5Fff.F90 | 6 ------ fortran/src/H5Off.F90 | 6 ------ fortran/src/H5config_f.inc.cmake | 8 +++++++- fortran/src/H5config_f.inc.in | 3 +++ fortran/src/H5f90global.F90 | 6 ++++++ fortran/testpar/CMakeTests.cmake | 1 + fortran/testpar/subfiling.F90 | 11 ++++++++--- 7 files changed, 25 insertions(+), 16 deletions(-) diff --git a/fortran/src/H5Fff.F90 b/fortran/src/H5Fff.F90 index fee4d3c7cf8..d31117784e5 100644 --- a/fortran/src/H5Fff.F90 +++ b/fortran/src/H5Fff.F90 @@ -72,12 +72,6 @@ END FUNCTION h5fis_accessible INTEGER(HSIZE_T) :: tot_space !< Amount of free space in the file END TYPE H5F_info_free_t -!> @brief H5_ih_info_t derived type. - TYPE, BIND(C) :: H5_ih_info_t - INTEGER(HSIZE_T) :: index_size !< btree and/or list - INTEGER(HSIZE_T) :: heap_size !< Heap size - END TYPE H5_ih_info_t - !> @brief H5F_info_t_sohm derived type. TYPE, BIND(C) :: H5F_info_sohm_t INTEGER(C_INT) :: version !< Version # of shared object header info diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index 4a0a1632e78..b705ba324d7 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -110,12 +110,6 @@ MODULE H5O TYPE(mesg_t) :: mesg END TYPE c_hdr_t -!> @brief Extra metadata storage for obj & attributes - TYPE, BIND(C) :: H5_ih_info_t - INTEGER(hsize_t) :: index_size !< btree and/or list - INTEGER(hsize_t) :: heap_size !< heap - END TYPE H5_ih_info_t - !> @brief meta_size_t derived type TYPE, BIND(C) :: meta_size_t TYPE(H5_ih_info_t) :: obj !< v1/v2 B-tree & local/fractal heap for groups, B-tree for chunked datasets diff --git a/fortran/src/H5config_f.inc.cmake b/fortran/src/H5config_f.inc.cmake index 34fb091c787..71bce0e18c2 100644 --- a/fortran/src/H5config_f.inc.cmake +++ b/fortran/src/H5config_f.inc.cmake @@ -23,6 +23,12 @@ #undef H5_HAVE_SUBFILING_VFD #endif +! Define if on APPLE +#cmakedefine01 H5_HAVE_DARWIN +#if H5_HAVE_DARWIN == 0 +#undef H5_HAVE_DARWIN +#endif + ! Define if the intrinsic function STORAGE_SIZE exists #define H5_FORTRAN_HAVE_STORAGE_SIZE @H5_FORTRAN_HAVE_STORAGE_SIZE@ @@ -81,4 +87,4 @@ #cmakedefine01 H5_NO_DEPRECATED_SYMBOLS #if H5_NO_DEPRECATED_SYMBOLS == 0 #undef H5_NO_DEPRECATED_SYMBOLS -#endif \ No newline at end of file +#endif diff --git a/fortran/src/H5config_f.inc.in b/fortran/src/H5config_f.inc.in index 7fb76e12449..991e4b0750b 100644 --- a/fortran/src/H5config_f.inc.in +++ b/fortran/src/H5config_f.inc.in @@ -20,6 +20,9 @@ ! Define if we have subfiling support #undef HAVE_SUBFILING_VFD +! Define if on APPLE +#undef HAVE_DARWIN + ! Define if the intrinsic function STORAGE_SIZE exists #undef FORTRAN_HAVE_STORAGE_SIZE diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90 index e60f1e83320..f6c06cbc056 100644 --- a/fortran/src/H5f90global.F90 +++ b/fortran/src/H5f90global.F90 @@ -25,6 +25,12 @@ MODULE H5GLOBAL IMPLICIT NONE +!> @brief H5_ih_info_t derived type. + TYPE, BIND(C) :: H5_ih_info_t + INTEGER(HSIZE_T) :: index_size !< btree and/or list + INTEGER(HSIZE_T) :: heap_size !< Heap size + END TYPE H5_ih_info_t + !> \addtogroup FH5 !> @{ ! Parameters used in the function 'h5kind_to_type' located in H5_ff.F90. diff --git a/fortran/testpar/CMakeTests.cmake b/fortran/testpar/CMakeTests.cmake index 8c157241500..473049fb976 100644 --- a/fortran/testpar/CMakeTests.cmake +++ b/fortran/testpar/CMakeTests.cmake @@ -17,3 +17,4 @@ ############################################################################## add_test (NAME MPI_TEST_FORT_parallel_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $ ${MPIEXEC_POSTFLAGS}) add_test (NAME MPI_TEST_FORT_subfiling_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $ ${MPIEXEC_POSTFLAGS}) +add_test (NAME MPI_TEST_FORT_async_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $ ${MPIEXEC_POSTFLAGS}) diff --git a/fortran/testpar/subfiling.F90 b/fortran/testpar/subfiling.F90 index 043ac6cb771..a677bea6121 100644 --- a/fortran/testpar/subfiling.F90 +++ b/fortran/testpar/subfiling.F90 @@ -54,6 +54,7 @@ PROGRAM subfiling_test INTEGER(HID_T) :: driver_id CHARACTER(len=8) :: hex1, hex2 + CHARACTER(len=1) :: arg ! ! initialize MPI @@ -336,10 +337,14 @@ PROGRAM subfiling_test WRITE(*,"(A,A)") "Failed to find the stub subfile ",TRIM(filename) nerrors = nerrors + 1 ENDIF - - CALL EXECUTE_COMMAND_LINE("stat --format='%i' "//filename//" >> tmp_inode", EXITSTAT=i) +#ifdef H5_HAVE_DARWIN + arg(1:1)="f" +#else + arg(1:1)="c" +#endif + CALL EXECUTE_COMMAND_LINE("stat -"//arg(1:1)//" %i "//filename//" >> tmp_inode", EXITSTAT=i) IF(i.ne.0)THEN - WRITE(*,"(A,A)") "Failed to stat the stub subfile ",TRIM(filename) + WRITE(*,"(A,A)") "Failed to stat the stub subfile ",TRIM(filename) nerrors = nerrors + 1 ENDIF From 66396cadb14faf892907b1b7fd2f21854b7655f4 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 23 Oct 2023 14:39:43 -0500 Subject: [PATCH 046/101] Fix windows cpack error in WiX package. (#3747) --- doxygen/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doxygen/CMakeLists.txt b/doxygen/CMakeLists.txt index 8fe3b771103..7dd7660621d 100644 --- a/doxygen/CMakeLists.txt +++ b/doxygen/CMakeLists.txt @@ -39,7 +39,7 @@ if (DOXYGEN_FOUND) install ( DIRECTORY ${HDF5_BINARY_DIR}/hdf5lib_docs/html DESTINATION ${HDF5_INSTALL_DOC_DIR} - COMPONENT Documents + COMPONENT hdfdocuments ) if (NOT TARGET doxygen) From d76d591555c57f95460e23a12baff66c6150a124 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:45:41 -0700 Subject: [PATCH 047/101] Add a simple cache to the ros3 VFD (#3753) Adds a small cache of the first N bytes of a file opened with the read-only S3 (ros3) VFD, where N is 4kiB or the size of the file, whichever is smaller. This avoids a lot of small I/O operations on file open. Addresses GitHub issue #3381 --- release_docs/RELEASE.txt | 10 ++++++ src/H5FDros3.c | 73 ++++++++++++++++++++++++++++------------ 2 files changed, 61 insertions(+), 22 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 0239a9e356f..19c7831862a 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -228,6 +228,16 @@ New Features Library: -------- + - Added a simple cache to the read-only S3 VFD + + The read-only S3 VFD now caches the first N bytes of a file stored + in S3 to avoid a lot of small I/O operations when opening files. + This cache is per-file and created when the file is opened. + + N is currently 4kiB or the size of the file, whichever is smaller. + + Addresses GitHub issue #3381 + - Added new API function H5Pget_actual_selection_io_mode() This function allows the user to determine if the library performed diff --git a/src/H5FDros3.c b/src/H5FDros3.c index 3f3413c6d0e..f704eff1efa 100644 --- a/src/H5FDros3.c +++ b/src/H5FDros3.c @@ -43,6 +43,9 @@ */ #define ROS3_STATS 0 +/* Max size of the cache, in bytes */ +#define ROS3_MAX_CACHE_SIZE 4096 + /* The driver identification number, initialized at runtime */ static hid_t H5FD_ROS3_g = 0; @@ -189,6 +192,8 @@ typedef struct H5FD_ros3_t { H5FD_ros3_fapl_t fa; haddr_t eoa; s3r_t *s3r_handle; + uint8_t *cache; + size_t cache_size; #if ROS3_STATS ros3_statsbin meta[ROS3_STATS_BIN_COUNT + 1]; ros3_statsbin raw[ROS3_STATS_BIN_COUNT + 1]; @@ -1000,6 +1005,18 @@ H5FD__ros3_open(const char *url, unsigned flags, hid_t fapl_id, haddr_t maxaddr) HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL, "unable to reset file statistics"); #endif /* ROS3_STATS */ + /* Cache the initial bytes of the file */ + { + size_t filesize = H5FD_s3comms_s3r_get_filesize(file->s3r_handle); + + file->cache_size = (filesize < ROS3_MAX_CACHE_SIZE) ? filesize : ROS3_MAX_CACHE_SIZE; + + if (NULL == (file->cache = (uint8_t *)H5MM_calloc(file->cache_size))) + HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, NULL, "unable to allocate cache memory"); + if (H5FD_s3comms_s3r_read(file->s3r_handle, 0, file->cache_size, file->cache) == FAIL) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, NULL, "unable to execute read"); + } + ret_value = (H5FD_t *)file; done: @@ -1007,8 +1024,10 @@ H5FD__ros3_open(const char *url, unsigned flags, hid_t fapl_id, haddr_t maxaddr) if (handle != NULL) if (FAIL == H5FD_s3comms_s3r_close(handle)) HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, "unable to close s3 file handle"); - if (file != NULL) + if (file != NULL) { + H5MM_xfree(file->cache); file = H5FL_FREE(H5FD_ros3_t, file); + } curl_global_cleanup(); /* early cleanup because open failed */ } /* end if null return value (error) */ @@ -1335,6 +1354,7 @@ H5FD__ros3_close(H5FD_t H5_ATTR_UNUSED *_file) #endif /* ROS3_STATS */ /* Release the file info */ + H5MM_xfree(file->cache); file = H5FL_FREE(H5FD_ros3_t, file); done: @@ -1666,41 +1686,50 @@ H5FD__ros3_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU fprintf(stdout, "H5FD__ros3_read() called.\n"); #endif - assert(file != NULL); - assert(file->s3r_handle != NULL); - assert(buf != NULL); + assert(file); + assert(file->cache); + assert(file->s3r_handle); + assert(buf); filesize = H5FD_s3comms_s3r_get_filesize(file->s3r_handle); if ((addr > filesize) || ((addr + size) > filesize)) HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "range exceeds file address"); - if (H5FD_s3comms_s3r_read(file->s3r_handle, addr, size, buf) == FAIL) - HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "unable to execute read"); + /* Copy from the cache when accessing the first N bytes of the file. + * Saves network I/O operations when opening files. + */ + if (addr + size < file->cache_size) { + memcpy(buf, file->cache + addr, size); + } + else { + if (H5FD_s3comms_s3r_read(file->s3r_handle, addr, size, buf) == FAIL) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "unable to execute read"); #if ROS3_STATS - /* Find which "bin" this read fits in. Can be "overflow" bin. */ - for (bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) - if ((unsigned long long)size < ros3_stats_boundaries[bin_i]) - break; - bin = (type == H5FD_MEM_DRAW) ? &file->raw[bin_i] : &file->meta[bin_i]; + /* Find which "bin" this read fits in. Can be "overflow" bin. */ + for (bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) + if ((unsigned long long)size < ros3_stats_boundaries[bin_i]) + break; + bin = (type == H5FD_MEM_DRAW) ? &file->raw[bin_i] : &file->meta[bin_i]; - /* Store collected stats in appropriate bin */ - if (bin->count == 0) { - bin->min = size; - bin->max = size; - } - else { - if (size < bin->min) + /* Store collected stats in appropriate bin */ + if (bin->count == 0) { bin->min = size; - if (size > bin->max) bin->max = size; - } - bin->count++; - bin->bytes += (unsigned long long)size; + } + else { + if (size < bin->min) + bin->min = size; + if (size > bin->max) + bin->max = size; + } + bin->count++; + bin->bytes += (unsigned long long)size; #endif /* ROS3_STATS */ + } done: FUNC_LEAVE_NOAPI(ret_value) From 744a20fc21be7437bb2449e6b6165702703a1349 Mon Sep 17 00:00:00 2001 From: bmribler <39579120+bmribler@users.noreply.github.com> Date: Mon, 23 Oct 2023 15:46:56 -0400 Subject: [PATCH 048/101] Add helpful text to h5clear (#3754) Added text to the usage of h5clear to explain that this tool is not for fixing corrupted files but simply for helping in the inspection of the damage.. Fixed expected output for testing the modified usage. --- tools/src/misc/h5clear.c | 11 +++++++++-- tools/test/misc/expected/h5clear_missing_file.ddl | 11 +++++++++-- tools/test/misc/expected/h5clear_usage.ddl | 11 +++++++++-- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/tools/src/misc/h5clear.c b/tools/src/misc/h5clear.c index 31f7c1744f8..ea3e07249b1 100644 --- a/tools/src/misc/h5clear.c +++ b/tools/src/misc/h5clear.c @@ -61,6 +61,13 @@ static struct h5_long_options l_opts[] = { static void usage(const char *prog) { + fprintf(stdout, "h5clear clears superblock status flag field, removes metadata cache image, prints\n"); + fprintf(stdout, "EOA and EOF, or sets EOA of a file. It is not a general repair tool and should not\n"); + fprintf(stdout, "be used to fix file corruption. If a process doesn't shut down cleanly, the\n"); + fprintf(stdout, "superblock mark can be left that prevents opening a file without SWMR. Then,\n"); + fprintf(stdout, "h5clear can be used to remove this superblock mark so that the file can be inspected\n"); + fprintf(stdout, "and appropriate actions can be taken.\n"); + fprintf(stdout, "\n"); fprintf(stdout, "usage: %s [OPTIONS] file_name\n", prog); fprintf(stdout, " OPTIONS\n"); fprintf(stdout, " -h, --help Print a usage message and exit\n"); @@ -73,8 +80,8 @@ usage(const char *prog) fprintf(stdout, " C is >= 0; C is optional and will default to 1M when not set.\n"); fprintf(stdout, - " This option helps to repair a crashed file where the stored EOA\n"); - fprintf(stdout, " in the superblock is different from the actual EOF.\n"); + " This option helps to repair a crashed SWMR file when the stored\n"); + fprintf(stdout, " EOA in the superblock is different from the actual EOF.\n"); fprintf(stdout, " The file's EOA and EOF will be the same after applying\n"); fprintf(stdout, " this option to the file.\n"); fprintf(stdout, "\n"); diff --git a/tools/test/misc/expected/h5clear_missing_file.ddl b/tools/test/misc/expected/h5clear_missing_file.ddl index c7a21189ea7..fe659af9593 100644 --- a/tools/test/misc/expected/h5clear_missing_file.ddl +++ b/tools/test/misc/expected/h5clear_missing_file.ddl @@ -1,3 +1,10 @@ +h5clear clears superblock status flag field, removes metadata cache image, prints +EOA and EOF, or sets EOA of a file. It is not a general repair tool and should not +be used to fix file corruption. If a process doesn't shut down cleanly, the +superblock mark can be left that prevents opening a file without SWMR. Then, +h5clear can be used to remove this superblock mark so that the file can be inspected +and appropriate actions can be taken. + usage: h5clear [OPTIONS] file_name OPTIONS -h, --help Print a usage message and exit @@ -8,8 +15,8 @@ usage: h5clear [OPTIONS] file_name --increment=C Set the file's EOA to the maximum of (EOA, EOF) + C for the file . C is >= 0; C is optional and will default to 1M when not set. - This option helps to repair a crashed file where the stored EOA - in the superblock is different from the actual EOF. + This option helps to repair a crashed SWMR file when the stored + EOA in the superblock is different from the actual EOF. The file's EOA and EOF will be the same after applying this option to the file. diff --git a/tools/test/misc/expected/h5clear_usage.ddl b/tools/test/misc/expected/h5clear_usage.ddl index c7a21189ea7..fe659af9593 100644 --- a/tools/test/misc/expected/h5clear_usage.ddl +++ b/tools/test/misc/expected/h5clear_usage.ddl @@ -1,3 +1,10 @@ +h5clear clears superblock status flag field, removes metadata cache image, prints +EOA and EOF, or sets EOA of a file. It is not a general repair tool and should not +be used to fix file corruption. If a process doesn't shut down cleanly, the +superblock mark can be left that prevents opening a file without SWMR. Then, +h5clear can be used to remove this superblock mark so that the file can be inspected +and appropriate actions can be taken. + usage: h5clear [OPTIONS] file_name OPTIONS -h, --help Print a usage message and exit @@ -8,8 +15,8 @@ usage: h5clear [OPTIONS] file_name --increment=C Set the file's EOA to the maximum of (EOA, EOF) + C for the file . C is >= 0; C is optional and will default to 1M when not set. - This option helps to repair a crashed file where the stored EOA - in the superblock is different from the actual EOF. + This option helps to repair a crashed SWMR file when the stored + EOA in the superblock is different from the actual EOF. The file's EOA and EOF will be the same after applying this option to the file. From a6d1bda6d2da6121a2d8f66cd7a1e0d2cafe2da6 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:50:15 -0700 Subject: [PATCH 049/101] Update Autotools to correctly configure oneAPI (#3751) * Update Autotools to correctly configure oneAPI Splits the Intel config files under the Autotools into 'classic' Intel and oneAPI versions, fixing 'unsupported option' messages. Also turns off `-check uninit` (new in 2023) in Fortran, which kills the H5_buildiface program due to false positives. * Enable Fortran in oneAPI CI workflow * Turn on Fortran in CMake, update LD_LIBRARY_PATH * Go back to disabling Fortran w/ Intel For some reason there's a linking problem w/ Fortran error while loading shared libraries: libifport.so.5: cannot open shared object file: No such file or directory --- config/apple | 42 +++++++---- config/freebsd | 16 +++- config/linux-gnulibc1 | 33 ++++++++- config/netbsd | 13 +++- config/oneapi-cxxflags | 155 +++++++++++++++++++++++++++++++++++++++ config/oneapi-fflags | 145 ++++++++++++++++++++++++++++++++++++ config/oneapi-flags | 151 ++++++++++++++++++++++++++++++++++++++ release_docs/RELEASE.txt | 8 ++ 8 files changed, 539 insertions(+), 24 deletions(-) create mode 100644 config/oneapi-cxxflags create mode 100644 config/oneapi-fflags create mode 100644 config/oneapi-flags diff --git a/config/apple b/config/apple index a8a219b6798..39ed454a11f 100644 --- a/config/apple +++ b/config/apple @@ -55,30 +55,19 @@ fi # Figure out C compiler flags . $srcdir/config/gnu-flags . $srcdir/config/clang-flags +. $srcdir/config/oneapi-flags . $srcdir/config/intel-flags -# temp patch: if GCC 4.2.1 is used in Lion or Mountain Lion systems, do not -# use -O option as it causes failures in test/dt_arith. -case "$host_os" in - darwin1[12].*) # lion & mountain lion - #echo cc_vendor=$cc_vendor'-'cc_version=$cc_version - case "$cc_vendor-$cc_version" in - gcc-4.2.1) - # Remove any -O flags - #echo PROD_CFLAGS=$PROD_CFLAGS - PROD_CFLAGS="`echo $PROD_CFLAGS | sed -e 's/-O[0-3]*//'`" - #echo new PROD_CFLAGS=$PROD_CFLAGS - ;; - esac - ;; -esac - if test "X-" = "X-$FC"; then case $CC_BASENAME in gcc*) FC=gfortran FC_BASENAME=gfortran ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -97,6 +86,7 @@ fi # Figure out FORTRAN compiler flags . $srcdir/config/gnu-fflags +. $srcdir/config/oneapi-fflags . $srcdir/config/intel-fflags @@ -107,6 +97,10 @@ if test "X-" = "X-$CXX"; then CXX=g++ CXX_BASENAME=g++ ;; + icx) + CXX=icpx + CXX_BASENAME=icpx + ;; icc) CXX=icpc CXX_BASENAME=icpc @@ -123,6 +117,7 @@ if test "X-" = "X-$CXX"; then fi # Figure out C++ compiler flags +. $srcdir/config/oneapi-cxxflags . $srcdir/config/intel-cxxflags # Do this ahead of GNU to avoid icpc being detected as g++ . $srcdir/config/gnu-cxxflags . $srcdir/config/clang-cxxflags @@ -139,6 +134,11 @@ case $CC in grep 'GCC' | sed 's/.*\((GCC) [-a-z0-9\. ]*.*\)/\1/'` ;; + *icx*) + cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *icc*) cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` @@ -156,6 +156,11 @@ case $FC in grep 'GCC' | sed 's/\(.*(GCC) [-a-z0-9\. ]*\).*/\1/'` ;; + *ifx*) + fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *ifc*|*ifort*) fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` @@ -179,6 +184,11 @@ case $CXX in grep 'GCC' | sed 's/.*\((GCC) [-a-z0-9\. ]*.*\)/\1/'` ;; + *icpx*) + cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *icpc*) cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` diff --git a/config/freebsd b/config/freebsd index 2fb962fe308..b0e825a9e26 100644 --- a/config/freebsd +++ b/config/freebsd @@ -29,7 +29,10 @@ fi # Figure out GNU C compiler flags . $srcdir/config/gnu-flags -# Figure out Intel C compiler flags +# Figure out Intel oneAPI C compiler flags +. $srcdir/config/oneapi-flags + +# Figure out Intel classic C compiler flags . $srcdir/config/intel-flags # The default Fortran 90 compiler @@ -43,6 +46,10 @@ if test "X-" = "X-$FC"; then FC=gfortran FC_BASENAME=gfortran ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -57,8 +64,11 @@ fi # Figure out FORTRAN compiler flags . $srcdir/config/gnu-fflags -# Figure out Intel F90 compiler flags -. $srcdir/config/intel-fflags +# Figure out Intel oneAPI FC compiler flags +. $srcdir/config/oneapi-fflags + +# Figure out Intel classic FC compiler flags +. $srcdir/config/classic-fflags # The default C++ compiler diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1 index 7614b07852f..328f8d3cec3 100644 --- a/config/linux-gnulibc1 +++ b/config/linux-gnulibc1 @@ -38,7 +38,10 @@ fi # Figure out CCE C compiler flags . $srcdir/config/cce-flags -# Figure out Intel C compiler flags +# Figure out Intel oneAPI C compiler flags +. $srcdir/config/oneapi-flags + +# Figure out Intel classic C compiler flags . $srcdir/config/intel-flags # Figure out Clang C compiler flags @@ -55,6 +58,10 @@ if test "X-" = "X-$FC"; then FC=pgf90 FC_BASENAME=pgf90 ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -119,7 +126,10 @@ fi # Figure out CCE FC compiler flags . $srcdir/config/cce-fflags -# Figure out Intel FC compiler flags +# Figure out Intel oneAPI FC compiler flags +. $srcdir/config/oneapi-fflags + +# Figure out Intel classic FC compiler flags . $srcdir/config/intel-fflags # Figure out Clang FC compiler flags @@ -200,7 +210,10 @@ if test -z "$CXX"; then CXX_BASENAME=g++ fi -# Figure out Intel CXX compiler flags +# Figure out Intel oneAPI CXX compiler flags +. $srcdir/config/oneapi-cxxflags + +# Figure out Intel classic CXX compiler flags # Do this ahead of GNU to avoid icpc being detected as g++ . $srcdir/config/intel-cxxflags @@ -237,6 +250,11 @@ case $CC in cc_version_info=`echo $cc_version_info` ;; + *icx*) + cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *icc*) cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` @@ -271,6 +289,11 @@ case $FC in fc_version_info=`echo $fc_version_info` ;; + *ifx*) + fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *ifc*|*ifort*) fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` @@ -333,6 +356,10 @@ case $CXX in cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS --version 2>&1 |\ grep 'GCC' | sed 's/\(.*(GCC) [-a-z0-9\. ]*\).*/\1/'` ;; + *icpx*) + cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; *icpc*) cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` diff --git a/config/netbsd b/config/netbsd index 04761f294a8..0ed84f7b3d2 100644 --- a/config/netbsd +++ b/config/netbsd @@ -26,7 +26,10 @@ fi # Figure out C compiler flags . $srcdir/config/gnu-flags -# Figure out Intel C compiler flags +# Figure out Intel oneAPI C compiler flags +. $srcdir/config/oneapi-flags + +# Figure out Intel classic C compiler flags . $srcdir/config/intel-flags # The default Fortran 90 compiler @@ -36,6 +39,10 @@ if test "X-" = "X-$FC"; then FC=gfortran FC_BASENAME=gfortran ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -50,6 +57,8 @@ fi # Figure out FORTRAN compiler flags . $srcdir/config/gnu-fflags -# Figure out Intel F90 compiler flags +# Figure out Intel oneAPI FC compiler flags . $srcdir/config/intel-fflags +# Figure out Intel classic FC compiler flags +. $srcdir/config/oneapi-fflags diff --git a/config/oneapi-cxxflags b/config/oneapi-cxxflags new file mode 100644 index 00000000000..d9819b94c44 --- /dev/null +++ b/config/oneapi-cxxflags @@ -0,0 +1,155 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# Intel icpx compiler or a derivative. It is careful not to do anything +# if the compiler is not Intel; otherwise `cxx_flags_set' is set to `yes' +# + +# +# Prepend `$srcdir/config/intel-warnings/` to the filename suffix(es) given as +# subroutine argument(s), remove comments starting with # and ending +# at EOL, replace spans of whitespace (including newlines) with spaces, +# and re-emit the file(s) thus filtered on the standard output stream. +# +load_intel_arguments() +{ + set -- $(for arg; do + sed 's,#.*$,,' $srcdir/config/intel-warnings/${arg} + done) + IFS=' ' echo "$*" +} + +# Get the compiler version in a way that works for icpx +# icpx unless a compiler version is already known +# +# cxx_vendor: The compiler name: icpx +# cxx_version: Version number: 2023.2.0 +# +if test X = "X$cxx_flags_set"; then + cxx_version="`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 |grep 'oneAPI'`" + if test X != "X$cxx_version"; then + cxx_vendor=icpx + cxx_version=`echo $cxx_version |sed 's/.*Version \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CXX' is Intel oneAPI $cxx_vendor-$cxx_version" + + # Some version numbers + # Intel oneAPI version numbers are of the form: "major.minor.patch" + cxx_vers_major=`echo $cxx_version | cut -f1 -d.` + cxx_vers_minor=`echo $cxx_version | cut -f2 -d.` + cxx_vers_patch=`echo $cxx_version | cut -f2 -d.` + test -n "$cxx_vers_major" || cxx_vers_major=0 + test -n "$cxx_vers_minor" || cxx_vers_minor=0 + test -n "$cxx_vers_patch" || cxx_vers_patch=0 + cxx_vers_all=`expr $cxx_vers_major '*' 1000000 + $cxx_vers_minor '*' 1000 + $cxx_vers_patch` + fi +fi + +# Common Intel flags for various situations +if test "X-icpx" = "X-$cxx_vendor"; then + # Insert section about version specific problems from compiler flags here, + # if necessary. + + arch= + # Architecture-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ########### + # General # + ########### + + # Default to C++11 standard + H5_CXXFLAGS="$H5_CXXFLAGS $arch -std=c++11" + + ############## + # Production # + ############## + + PROD_CXXFLAGS= + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly in configure + # -g is handled by the symbols flags + DEBUG_CXXFLAGS= + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CXXFLAGS="-Wl,-s" + SYMBOLS_CXXFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CXXFLAGS="-p" + + ################ + # Optimization # + ################ + + HIGH_OPT_CXXFLAGS="-O3" + DEBUG_OPT_CXXFLAGS="-O0" + NO_OPT_CXXFLAGS="-O0" + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + # Add various general warning flags in intel-warnings. + # Use the C warnings as CXX warnings are the same + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments oneapi/general)" + + ###################### + # Developer warnings # + ###################### + + # Use the C warnings as CXX warnings are the same + DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments oneapi/developer-general) + + ############################# + # Version-specific warnings # + ############################# + + ################# + # Flags are set # + ################# + cxx_flags_set=yes +fi + +# Clear cxx info if no flags set +if test "X-$cxx_flags_set" = "X-"; then + cxx_vendor= + cxx_version= +fi diff --git a/config/oneapi-fflags b/config/oneapi-fflags new file mode 100644 index 00000000000..a63108d0b99 --- /dev/null +++ b/config/oneapi-fflags @@ -0,0 +1,145 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# Intel oneAPI ifx compiler or a derivative. It is careful not to do anything +# if the compiler is not Intel; otherwise `f9x_flags_set' is set to `yes' +# + +# +# Prepend `$srcdir/config/intel-warnings/` to the filename suffix(es) given as +# subroutine argument(s), remove comments starting with # and ending +# at EOL, replace spans of whitespace (including newlines) with spaces, +# and re-emit the file(s) thus filtered on the standard output stream. +# +load_intel_arguments() +{ + set -- $(for arg; do + sed 's,#.*$,,' $srcdir/config/intel-warnings/${arg} + done) + IFS=' ' echo "$*" +} + +# Get the compiler version in a way that works for ifx +# ifx unless a compiler version is already known +# +# f9x_vendor: The compiler name: ifx +# f9x_version: Version number: 2023.2.0 +# +if test X = "X$f9x_flags_set"; then + f9x_version="`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 |grep '^Intel'`" + if test X != "X$f9x_version"; then + f9x_vendor=ifx + f9x_version="`echo $f9x_version |sed 's/.*Version \([-a-z0-9\.\-]*\).*/\1/'`" + echo "compiler '$FC' is Intel oneAPI $f9x_vendor-$f9x_version" + + # Some version numbers + # Intel oneAPI version numbers are of the form: "major.minor.patch" + f9x_vers_major=`echo $f9x_version | cut -f1 -d.` + f9x_vers_minor=`echo $f9x_version | cut -f2 -d.` + f9x_vers_patch=`echo $f9x_version | cut -f2 -d.` + test -n "$f9x_vers_major" || f9x_vers_major=0 + test -n "$f9x_vers_minor" || f9x_vers_minor=0 + test -n "$f9x_vers_patch" || f9x_vers_patch=0 + f9x_vers_all=`expr $f9x_vers_major '*' 1000000 + $f9x_vers_minor '*' 1000 + $f9x_vers_patch` + fi +fi + +if test "X-ifx" = "X-$f9x_vendor"; then + + FC_BASENAME=ifx + F9XSUFFIXFLAG="" + FSEARCH_DIRS="" + + ############################### + # Architecture-specific flags # + ############################### + + arch= + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ############## + # Production # + ############## + + PROD_FCFLAGS= + + ######### + # Debug # + ######### + + # Don't use -check uninit or you'll get false positives from H5_buildiface + DEBUG_FCFLAGS="-check all,nouninit" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_FCFLAGS= + SYMBOLS_FCFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_FCFLAGS="-p" + + ################ + # Optimization # + ################ + + HIGH_OPT_FCFLAGS="-O3" + DEBUG_OPT_FCFLAGS="-O0" + NO_OPT_FCFLAGS="-O0" + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + H5_FCFLAGS="$H5_FCFLAGS -free" + H5_FCFLAGS="$H5_FCFLAGS $(load_intel_arguments oneapi/ifort-general)" + + ############################# + # Version-specific warnings # + ############################# + + + ################# + # Flags are set # + ################# + f9x_flags_set=yes +fi + +# Clear f9x info if no flags set +if test "X-$f9x_flags_set" = "X-"; then + f9x_vendor= + f9x_version= +fi + diff --git a/config/oneapi-flags b/config/oneapi-flags new file mode 100644 index 00000000000..629e93f02f1 --- /dev/null +++ b/config/oneapi-flags @@ -0,0 +1,151 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# Intel icx compiler or a derivative. It is careful not to do anything +# if the compiler is not Intel; otherwise `cc_flags_set' is set to `yes' +# + +# +# Prepend `$srcdir/config/intel-warnings/` to the filename suffix(es) given as +# subroutine argument(s), remove comments starting with # and ending +# at EOL, replace spans of whitespace (including newlines) with spaces, +# and re-emit the file(s) thus filtered on the standard output stream. +# +load_intel_arguments() +{ + set -- $(for arg; do + sed 's,#.*$,,' $srcdir/config/intel-warnings/${arg} + done) + IFS=' ' echo "$*" +} + +# Get the compiler version in a way that works for icx +# icx unless a compiler version is already known +# cc_vendor: The compiler name: icx +# cc_version: Version number: 2023.2.0 +# +if test X = "X$cc_flags_set"; then + cc_version="`$CC $CFLAGS $H5_CFLAGS -V 2>&1 |grep 'oneAPI'`" + if test X != "X$cc_version"; then + cc_vendor=icx + cc_version=`echo $cc_version |sed 's/.*Version \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CC' is Intel oneAPI $cc_vendor-$cc_version" + + # Some version numbers + # Intel oneAPI version numbers are of the form: "major.minor.patch" + cc_vers_major=`echo $cc_version | cut -f1 -d.` + cc_vers_minor=`echo $cc_version | cut -f2 -d.` + cc_vers_patch=`echo $cc_version | cut -f2 -d.` + test -n "$cc_vers_major" || cc_vers_major=0 + test -n "$cc_vers_minor" || cc_vers_minor=0 + test -n "$cc_vers_patch" || cc_vers_patch=0 + cc_vers_all=`expr $cc_vers_major '*' 1000000 + $cc_vers_minor '*' 1000 + $cc_vers_patch` + fi +fi + +# Common Intel flags for various situations +if test "X-icx" = "X-$cc_vendor"; then + # Insert section about version specific problems from compiler flags here, + # if necessary. + + arch= + # Architecture-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ########### + # General # + ########### + + # Default to C99 standard. + H5_CFLAGS="$H5_CFLAGS $arch -std=c99" + + ############## + # Production # + ############## + + PROD_CFLAGS= + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly in configure + DEBUG_CFLAGS= + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CFLAGS="-Wl,-s" + SYMBOLS_CFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CFLAGS="-p" + + ################ + # Optimization # + ################ + + HIGH_OPT_CFLAGS="-O3" + DEBUG_OPT_CFLAGS="-O0" + NO_OPT_CFLAGS="-O0" + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + # Add various general warning flags in intel-warnings. + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments oneapi/general)" + + ###################### + # Developer warnings # + ###################### + + DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments oneapi/developer-general) + + ############################# + # Version-specific warnings # + ############################# + + ################# + # Flags are set # + ################# + cc_flags_set=yes +fi + +# Clear cc info if no flags set +if test "X-$cc_flags_set" = "X-"; then + cc_vendor= + cc_version= +fi diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 19c7831862a..9643cf9ecf6 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,14 @@ New Features Configuration: ------------- + - Improved support for Intel oneAPI + + * Separates the old 'classic' Intel compiler settings and warnings + from the oneAPI settings + * Uses `-check nouninit` in debug builds to avoid false positives + when building H5_buildiface with `-check all` + * Both Autotools and CMake + - Added new options for CMake and Autotools to control the Doxygen warnings as errors setting. From da3b7ff945c931878df85bca49b9c21a89e5a824 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Mon, 23 Oct 2023 14:55:47 -0500 Subject: [PATCH 050/101] Add h5pget_actual_selection_io_mode fortran wrapper (#3746) * added h5pget_actual_selection_io_mode_f test * added tests for h5pget_actual_selection_io_mode_f * fixed int_f type conversion --- fortran/src/H5Pff.F90 | 38 ++++++++++++++++++++++++++++++++++++- fortran/src/H5_f.c | 4 ++++ fortran/src/H5_ff.F90 | 5 ++++- fortran/src/H5f90global.F90 | 10 ++++++++++ fortran/test/tH5P.F90 | 5 +++++ fortran/testpar/hyper.F90 | 15 +++++++++++++++ release_docs/RELEASE.txt | 3 ++- 7 files changed, 77 insertions(+), 3 deletions(-) diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index bbc7a9dc066..5821889c3e9 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -6405,7 +6405,7 @@ END SUBROUTINE H5Pset_file_space_strategy_f !! \brief Gets the file space handling strategy and persisting free-space values for a file creation property list. !! !! \param plist_id File creation property list identifier -!! \param strategy The file space handling strategy to be used. +!! \param strategy The file space handling strategy to be used !! \param persist Indicate whether free space should be persistent or not !! \param threshold The free-space section size threshold value !! \param hdferr \fortran_error @@ -6507,6 +6507,42 @@ END FUNCTION H5Pget_file_space_page_size hdferr = INT(h5pget_file_space_page_size(prp_id, fsp_size)) END SUBROUTINE h5pget_file_space_page_size_f +!> +!! \ingroup FH5P +!! +!! \brief Retrieves the type(s) of I/O that HDF5 actually performed on raw data +!! during the last I/O call. +!! +!! \param plist_id File creation property list identifier +!! \param actual_selection_io_mode A bitwise set value indicating the type(s) of I/O performed +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Pget_actual_selection_io_mode() +!! + SUBROUTINE h5pget_actual_selection_io_mode_f(plist_id, actual_selection_io_mode, hdferr) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: plist_id + INTEGER , INTENT(OUT) :: actual_selection_io_mode + INTEGER , INTENT(OUT) :: hdferr + + INTEGER(C_INT32_T) :: c_actual_selection_io_mode + + INTERFACE + INTEGER(C_INT) FUNCTION H5Pget_actual_selection_io_mode(plist_id, actual_selection_io_mode) & + BIND(C, NAME='H5Pget_actual_selection_io_mode') + IMPORT :: HID_T, C_INT32_T, C_INT + IMPLICIT NONE + INTEGER(HID_T), VALUE :: plist_id + INTEGER(C_INT32_T) :: actual_selection_io_mode + END FUNCTION H5Pget_actual_selection_io_mode + END INTERFACE + + hdferr = INT(H5Pget_actual_selection_io_mode(plist_id, c_actual_selection_io_mode)) + + actual_selection_io_mode = INT(c_actual_selection_io_mode) + + END SUBROUTINE h5pget_actual_selection_io_mode_f END MODULE H5P diff --git a/fortran/src/H5_f.c b/fortran/src/H5_f.c index 181047b5454..0392c2bdfa6 100644 --- a/fortran/src/H5_f.c +++ b/fortran/src/H5_f.c @@ -477,6 +477,10 @@ h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, hid h5d_flags[55] = (int_f)H5D_MPIO_LINK_CHUNK; h5d_flags[56] = (int_f)H5D_MPIO_MULTI_CHUNK; + h5d_flags[57] = (int_f)H5D_SCALAR_IO; + h5d_flags[58] = (int_f)H5D_VECTOR_IO; + h5d_flags[59] = (int_f)H5D_SELECTION_IO; + /* * H5E flags */ diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index 68b3dd874bb..53156731946 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -74,7 +74,7 @@ MODULE H5LIB ! ! H5D flags declaration ! - INTEGER, PARAMETER :: H5D_FLAGS_LEN = 57 + INTEGER, PARAMETER :: H5D_FLAGS_LEN = 60 INTEGER, DIMENSION(1:H5D_FLAGS_LEN) :: H5D_flags INTEGER, PARAMETER :: H5D_SIZE_FLAGS_LEN = 2 INTEGER(SIZE_T), DIMENSION(1:H5D_SIZE_FLAGS_LEN) :: H5D_size_flags @@ -467,6 +467,9 @@ END FUNCTION h5init1_flags_c H5D_MPIO_NO_CHUNK_OPTIMIZATION_F = H5D_flags(55) H5D_MPIO_LINK_CHUNK_F = H5D_flags(56) H5D_MPIO_MULTI_CHUNK_F = H5D_flags(57) + H5D_SCALAR_IO_F = H5D_flags(58) + H5D_VECTOR_IO_F = H5D_flags(59) + H5D_SELECTION_IO_F = H5D_flags(60) H5D_CHUNK_CACHE_NSLOTS_DFLT_F = H5D_size_flags(1) H5D_CHUNK_CACHE_NBYTES_DFLT_F = H5D_size_flags(2) diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90 index f6c06cbc056..aa046235eb3 100644 --- a/fortran/src/H5f90global.F90 +++ b/fortran/src/H5f90global.F90 @@ -374,6 +374,12 @@ MODULE H5GLOBAL !DEC$ATTRIBUTES DLLEXPORT :: H5D_MPIO_NO_CHUNK_OPTIMIZATION_F !DEC$ATTRIBUTES DLLEXPORT :: H5D_MPIO_LINK_CHUNK_F !DEC$ATTRIBUTES DLLEXPORT :: H5D_MPIO_MULTI_CHUNK_F + + !DEC$ATTRIBUTES DLLEXPORT :: H5D_SCALAR_IO_F + !DEC$ATTRIBUTES DLLEXPORT :: H5D_VECTOR_IO_F + !DEC$ATTRIBUTES DLLEXPORT :: H5D_SELECTION_IO_F + + !DEC$endif !> \addtogroup FH5D !> @{ @@ -450,6 +456,10 @@ MODULE H5GLOBAL INTEGER :: H5D_MPIO_NO_CHUNK_OPTIMIZATION_F !< H5D_MPIO_NO_CHUNK_OPTIMIZATION INTEGER :: H5D_MPIO_LINK_CHUNK_F !< H5D_MPIO_LINK_CHUNK INTEGER :: H5D_MPIO_MULTI_CHUNK_F !< H5D_MPIO_MULTI_CHUNK + + INTEGER :: H5D_SCALAR_IO_F !< Scalar (or legacy MPIO) I/O was performed + INTEGER :: H5D_VECTOR_IO_F !< Vector I/O was performed + INTEGER :: H5D_SELECTION_IO_F !< Selection I/O was performed ! ! H5E flags declaration ! diff --git a/fortran/test/tH5P.F90 b/fortran/test/tH5P.F90 index c73016bc7cf..78d665f0aa1 100644 --- a/fortran/test/tH5P.F90 +++ b/fortran/test/tH5P.F90 @@ -869,6 +869,7 @@ SUBROUTINE test_in_place_conversion(cleanup, total_error) REAL(KIND=C_DOUBLE), DIMENSION(1:array_len) :: wbuf_d_org REAL(KIND=C_FLOAT), DIMENSION(1:array_len), TARGET :: rbuf INTEGER :: i + INTEGER :: actual_selection_io_mode TYPE(C_PTR) :: f_ptr ! create the data @@ -919,6 +920,10 @@ SUBROUTINE test_in_place_conversion(cleanup, total_error) ! Should not be equal for in-place buffer use CALL VERIFY("h5dwrite_f -- in-place", wbuf_d(1), wbuf_d_org(1), total_error, .FALSE.) + CALL h5pget_actual_selection_io_mode_f(plist_id, actual_selection_io_mode, error) + CALL check("h5pget_actual_selection_io_mode_f", error, total_error) + CALL VERIFY("h5pget_actual_selection_io_mode_f", actual_selection_io_mode, H5D_SCALAR_IO_F, total_error) + f_ptr = C_LOC(rbuf) CALL h5dread_f(dset_id, h5kind_to_type(KIND(rbuf(1)), H5_REAL_KIND), f_ptr, error) CALL check("h5dread_f", error, total_error) diff --git a/fortran/testpar/hyper.F90 b/fortran/testpar/hyper.F90 index edd93cf9b8f..ec3a657afbb 100644 --- a/fortran/testpar/hyper.F90 +++ b/fortran/testpar/hyper.F90 @@ -55,6 +55,7 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors) INTEGER :: local_no_collective_cause INTEGER :: global_no_collective_cause INTEGER :: no_selection_io_cause + INTEGER :: actual_selection_io_mode ! ! initialize the array data between the processes (3) @@ -236,6 +237,20 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors) CALL h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) CALL check("h5dwrite_f", hdferror, nerrors) + CALL h5pget_actual_selection_io_mode_f(dxpl_id, actual_selection_io_mode, hdferror) + CALL check("h5pget_actual_selection_io_mode_f", hdferror, nerrors) + IF(do_collective)THEN + IF(actual_selection_io_mode .NE. H5D_SELECTION_IO_F)THEN + PRINT*, "Incorrect actual selection io mode" + nerrors = nerrors + 1 + ENDIF + ELSE + IF(actual_selection_io_mode .NE. IOR(H5D_SELECTION_IO_F, H5D_SCALAR_IO_F))THEN + PRINT*, "Incorrect actual selection io mode" + nerrors = nerrors + 1 + ENDIF + ENDIF + ! Check h5pget_mpio_actual_io_mode_f function CALL h5pget_mpio_actual_io_mode_f(dxpl_id, actual_io_mode, hdferror) CALL check("h5pget_mpio_actual_io_mode_f", hdferror, nerrors) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 9643cf9ecf6..a5bcb94e807 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -332,7 +332,8 @@ New Features - Fortran async APIs H5A, H5D, H5ES, H5G, H5F, H5L and H5O were added. - Added Fortran APIs: - h5pset_selection_io_f, h5pget_selection_io_f + h5pset_selection_io_f, h5pget_selection_io_f, + h5pget_actual_selection_io_mode_f, h5pset_modify_write_buf_f, h5pget_modify_write_buf_f - Added Fortran APIs: From b411852e982e849ffa0602d640b5af4d47026fbb Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 23 Oct 2023 15:01:10 -0500 Subject: [PATCH 051/101] Update fortran action step (#3748) --- .github/workflows/cmake-ctest.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml index da99e7e02cc..ee4bcff4de3 100644 --- a/.github/workflows/cmake-ctest.yml +++ b/.github/workflows/cmake-ctest.yml @@ -191,8 +191,12 @@ jobs: # symlinks the compiler executables to a common location - name: Setup GNU Fortran - uses: modflowpy/install-gfortran-action@v1 - + uses: fortran-lang/setup-fortran@v1 + id: setup-fortran + with: + compiler: gcc + version: 12 + - name: Run ctest (MacOS) id: run-ctest run: | From 207d749a2810bd30ecc4321609c83fe51adbd0c3 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Mon, 23 Oct 2023 19:02:16 -0500 Subject: [PATCH 052/101] Added missing DLL for H5PGET_ACTUAL_SELECTION_IO_MODE_F (#3760) * add missing H5PGET_ACTUAL_SELECTION_IO_MODE_F dll --- fortran/src/hdf5_fortrandll.def.in | 1 + 1 file changed, 1 insertion(+) diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index 3b6600c061e..2ded00222ba 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -417,6 +417,7 @@ H5P_mp_H5PSET_FILE_SPACE_STRATEGY_F H5P_mp_H5PGET_FILE_SPACE_STRATEGY_F H5P_mp_H5PSET_FILE_SPACE_PAGE_SIZE_F H5P_mp_H5PGET_FILE_SPACE_PAGE_SIZE_F +H5P_mp_H5PGET_ACTUAL_SELECTION_IO_MODE_F ; Parallel @H5_NOPAREXP@H5P_mp_H5PSET_FAPL_MPIO_F @H5_NOPAREXP@H5P_mp_H5PGET_FAPL_MPIO_F From ceb03358a1d713078ae36bfff07be62b433d970a Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 23 Oct 2023 19:06:18 -0700 Subject: [PATCH 053/101] Bump the ros3 VFD cache to 16 MiB (#3759) --- release_docs/RELEASE.txt | 4 ++-- src/H5FDros3.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index a5bcb94e807..222c2774057 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -236,13 +236,13 @@ New Features Library: -------- - - Added a simple cache to the read-only S3 VFD + - Added a simple cache to the read-only S3 (ros3) VFD The read-only S3 VFD now caches the first N bytes of a file stored in S3 to avoid a lot of small I/O operations when opening files. This cache is per-file and created when the file is opened. - N is currently 4kiB or the size of the file, whichever is smaller. + N is currently 16 MiB or the size of the file, whichever is smaller. Addresses GitHub issue #3381 diff --git a/src/H5FDros3.c b/src/H5FDros3.c index f704eff1efa..c6aea0e327a 100644 --- a/src/H5FDros3.c +++ b/src/H5FDros3.c @@ -44,7 +44,7 @@ #define ROS3_STATS 0 /* Max size of the cache, in bytes */ -#define ROS3_MAX_CACHE_SIZE 4096 +#define ROS3_MAX_CACHE_SIZE 16777216 /* The driver identification number, initialized at runtime */ From af49eb5b8647e8d9ffb527fd533def0910eb535c Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 23 Oct 2023 21:06:44 -0500 Subject: [PATCH 054/101] Fix hangs during collective I/O with independent metadata writes (#3693) --- release_docs/RELEASE.txt | 19 ++++++++ src/H5Cmpio.c | 38 +++++++++++++-- src/H5Pfapl.c | 2 +- testpar/t_coll_md.c | 103 +++++++++++++++++++++++++++++++++++++++ testpar/testphdf5.c | 2 + testpar/testphdf5.h | 1 + 6 files changed, 160 insertions(+), 5 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 222c2774057..e5d53e41cbf 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -392,6 +392,25 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed potential hangs in parallel library during collective I/O with + independent metadata writes + + When performing collective parallel writes to a dataset where metadata + writes are requested as (or left as the default setting of) independent, + hangs could potentially occur during metadata cache sync points. This + was due to incorrect management of the internal state tracking whether + an I/O operation should be collective or not, causing the library to + attempt collective writes of metadata when they were meant to be + independent writes. During the metadata cache sync points, if the number + of cache entries being flushed was a multiple of the number of MPI ranks + in the MPI communicator used to access the HDF5 file, an equal amount of + collective MPI I/O calls were made and the dataset write call would be + successful. However, when the number of cache entries being flushed was + NOT a multiple of the number of MPI ranks, the ranks with more entries + than others would get stuck in an MPI_File_set_view call, while other + ranks would get stuck in a post-write MPI_Barrier call. This issue has + been fixed by correctly switching to independent I/O temporarily when + writing metadata independently during collective dataset I/O. - Dropped support for MPI-2 diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 643bbc80207..c8db5352ff6 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -154,8 +154,9 @@ herr_t H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr, int mpi_rank, int mpi_size) { - unsigned first_entry_to_flush; - unsigned last_entry_to_flush; + H5FD_mpio_xfer_t orig_xfer_mode; + unsigned first_entry_to_flush; + unsigned last_entry_to_flush; #ifndef NDEBUG unsigned total_entries_to_clear = 0; unsigned total_entries_to_flush = 0; @@ -172,8 +173,9 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha char *tbl_buf = NULL; #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ unsigned m, n; - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + unsigned u; /* Local index variable */ + bool restore_io_mode = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -185,6 +187,10 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha assert(0 <= mpi_rank); assert(mpi_rank < mpi_size); + /* Get I/O transfer mode */ + if (H5CX_get_io_xfer_mode(&orig_xfer_mode) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode"); + /* Initialize the entries_to_flush and entries_to_clear arrays */ memset(entries_to_flush, 0, sizeof(entries_to_flush)); memset(entries_to_clear, 0, sizeof(entries_to_clear)); @@ -418,6 +424,19 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha num_candidates, total_entries_to_clear, total_entries_to_flush); #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ + /* + * If collective I/O was requested, but collective metadata + * writes were not requested, temporarily disable collective + * I/O while flushing candidate entries so that we don't cause + * a hang in the case where the number of candidate entries + * to flush isn't a multiple of mpi_size. + */ + if ((orig_xfer_mode == H5FD_MPIO_COLLECTIVE) && !f->shared->coll_md_write) { + if (H5CX_set_io_xfer_mode(H5FD_MPIO_INDEPENDENT) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode"); + restore_io_mode = true; + } + /* We have now marked all the entries on the candidate list for * either flush or clear -- now scan the LRU and the pinned list * for these entries and do the deed. Do this via a call to @@ -431,6 +450,13 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha if (H5C__flush_candidate_entries(f, entries_to_flush, entries_to_clear) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed"); + /* Restore collective I/O if we temporarily disabled it */ + if (restore_io_mode) { + if (H5CX_set_io_xfer_mode(orig_xfer_mode) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode"); + restore_io_mode = false; + } + /* If we've deferred writing to do it collectively, take care of that now */ if (f->shared->coll_md_write) { /* Sanity check */ @@ -442,6 +468,10 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha } /* end if */ done: + /* Restore collective I/O if we temporarily disabled it */ + if (restore_io_mode && (H5CX_set_io_xfer_mode(orig_xfer_mode) < 0)) + HDONE_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode"); + if (candidate_assignment_table != NULL) candidate_assignment_table = (unsigned *)H5MM_xfree((void *)candidate_assignment_table); if (cache_ptr->coll_write_list) { diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 5f5782cae3b..dc122af9393 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -5174,7 +5174,7 @@ H5Pget_all_coll_metadata_ops(hid_t plist_id, hbool_t *is_collective /*out*/) * Function: H5Pset_coll_metadata_write * * Purpose: Tell the library whether the metadata write operations will - * be done collectively (1) or not (0). Default is collective. + * be done collectively (1) or not (0). Default is independent. * * Return: Non-negative on success/Negative on failure * diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c index 1220111a56d..9c6fc7120cf 100644 --- a/testpar/t_coll_md.c +++ b/testpar/t_coll_md.c @@ -43,6 +43,11 @@ #define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr" #define COLL_GHEAP_WRITE_ATTR_DIMS 1 +#define COLL_IO_IND_MD_WRITE_NDIMS 2 +#define COLL_IO_IND_MD_WRITE_CHUNK0 4 +#define COLL_IO_IND_MD_WRITE_CHUNK1 256 +#define COLL_IO_IND_MD_WRITE_NCHUNK1 16384 + /* * A test for issue HDFFV-10501. A parallel hang was reported which occurred * in linked-chunk I/O when collective metadata reads are enabled and some ranks @@ -569,3 +574,101 @@ test_collective_global_heap_write(void) VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); } + +/* + * A test to ensure that hangs don't occur when collective I/O + * is requested at the interface level (by a call to + * H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)), while + * collective metadata writes are NOT requested. + */ +void +test_coll_io_ind_md_write(void) +{ + const char *filename; + long long *data = NULL; + hsize_t dset_dims[COLL_IO_IND_MD_WRITE_NDIMS]; + hsize_t chunk_dims[COLL_IO_IND_MD_WRITE_NDIMS]; + hsize_t sel_dims[COLL_IO_IND_MD_WRITE_NDIMS]; + hsize_t offset[COLL_IO_IND_MD_WRITE_NDIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_id2 = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + int mpi_rank, mpi_size; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = GetTestParameters(); + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, false) >= 0), "Unset collective metadata reads succeeded"); + VRFY((H5Pset_coll_metadata_write(fapl_id, false) >= 0), "Unset collective metadata writes succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + dset_dims[0] = (hsize_t)(mpi_size * COLL_IO_IND_MD_WRITE_CHUNK0); + dset_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1 * COLL_IO_IND_MD_WRITE_NCHUNK1); + + fspace_id = H5Screate_simple(COLL_IO_IND_MD_WRITE_NDIMS, dset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + chunk_dims[0] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK0); + chunk_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1); + + VRFY((H5Pset_chunk(dcpl_id, COLL_IO_IND_MD_WRITE_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded"); + + VRFY((H5Pset_shuffle(dcpl_id) >= 0), "H5Pset_shuffle succeeded"); + + dset_id = H5Dcreate2(file_id, "dset1", H5T_NATIVE_LLONG, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + sel_dims[0] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK0); + sel_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1 * COLL_IO_IND_MD_WRITE_NCHUNK1); + + offset[0] = (hsize_t)mpi_rank * sel_dims[0]; + offset[1] = 0; + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, sel_dims, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + + data = malloc(sel_dims[0] * sel_dims[1] * sizeof(long long)); + for (size_t i = 0; i < sel_dims[0] * sel_dims[1]; i++) + data[i] = rand(); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_LLONG, H5S_BLOCK, fspace_id, dxpl_id, data) >= 0), + "H5Dwrite succeeded"); + + dset_id2 = H5Dcreate2(file_id, "dset2", H5T_NATIVE_LLONG, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id2 >= 0), "H5Dcreate2 succeeded"); + + for (size_t i = 0; i < sel_dims[0] * sel_dims[1]; i++) + data[i] = rand(); + + VRFY((H5Dwrite(dset_id2, H5T_NATIVE_LLONG, H5S_BLOCK, fspace_id, dxpl_id, data) >= 0), + "H5Dwrite succeeded"); + + free(data); + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Dclose(dset_id2) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 584ca1f6107..2d85e1ae289 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -521,6 +521,8 @@ main(int argc, char **argv) "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE); AddTest("GH_coll_MD_wr", test_collective_global_heap_write, NULL, "Collective MD write of global heap data", PARATESTFILE); + AddTest("COLLIO_INDMDWR", test_coll_io_ind_md_write, NULL, + "Collective I/O with Independent metadata writes", PARATESTFILE); /* Display testing information */ TestInfo(argv[0]); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 6ac8080c82a..5699760c61b 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -296,6 +296,7 @@ void test_partial_no_selection_coll_md_read(void); void test_multi_chunk_io_addrmap_issue(void); void test_link_chunk_io_sort_chunk_issue(void); void test_collective_global_heap_write(void); +void test_coll_io_ind_md_write(void); void test_oflush(void); /* commonly used prototypes */ From 8cff88c5073b0bbd66c5876e0ce3da16214af39c Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 23 Oct 2023 21:08:06 -0500 Subject: [PATCH 055/101] Fix some issues with collective metadata reads for chunked datasets (#3716) Add functions/callbacks for explicit control over chunk index open/close Add functions/callbacks to check if chunk index is open or not so that it can be opened if necessary before temporarily disabling collective metadata reads in the library Add functions/callbacks for requesting loading of additional chunk index metadata beyond the chunk index itself --- release_docs/RELEASE.txt | 40 ++++ src/H5Dbtree.c | 442 +++++++++++++++++++++--------------- src/H5Dbtree2.c | 467 ++++++++++++++++++++++++--------------- src/H5Dchunk.c | 63 ++++-- src/H5Dearray.c | 271 ++++++++++++++++------- src/H5Dfarray.c | 234 ++++++++++++++------ src/H5Dmpio.c | 40 ++-- src/H5Dnone.c | 170 ++++++++++---- src/H5Dpkg.h | 21 +- src/H5Dsingle.c | 168 ++++++++++---- 10 files changed, 1292 insertions(+), 624 deletions(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index e5d53e41cbf..e5cb09707bd 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -392,6 +392,46 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed some issues with chunk index metadata not getting read + collectively when collective metadata reads are enabled + + When looking up dataset chunks during I/O, the parallel library + temporarily disables collective metadata reads since it's generally + unlikely that the application will read the same chunks from all + MPI ranks. Leaving collective metadata reads enabled during + chunk lookups can lead to hangs or other bad behavior depending + on the chunk indexing structure used for the dataset in question. + However, due to the way that dataset chunk index metadata was + previously loaded in a deferred manner, this could mean that + the metadata for the main chunk index structure or its + accompanying pieces of metadata (e.g., fixed array data blocks) + could end up being read independently if these chunk lookup + operations are the first chunk index-related operation that + occurs on a dataset. This behavior is generally observed when + opening a dataset for which the metadata isn't in the metadata + cache yet and then immediately performing I/O on that dataset. + This behavior is not generally observed when creating a dataset + and then performing I/O on it, as the relevant metadata will + usually be in the metadata cache as a side effect of creating + the chunk index structures during dataset creation. + + This issue has been fixed by adding callbacks to the different + chunk indexing structure classes that allow more explicit control + over when chunk index metadata gets loaded. When collective + metadata reads are enabled, the necessary index metadata will now + get loaded collectively by all MPI ranks at the start of dataset + I/O to ensure that the ranks don't unintentionally read this + metadata independently further on. These changes fix collective + loading of the main chunk index structure, as well as v2 B-tree + root nodes, extensible array index blocks and fixed array data + blocks. There are still pieces of metadata that cannot currently + be loaded collectively, however, such as extensible array data + blocks, data block pages and super blocks, as well as fixed array + data block pages. These pieces of metadata are not necessarily + read in by all MPI ranks since this depends on which chunks the + ranks have selected in the dataset. Therefore, reading of these + pieces of metadata remains an independent operation. + - Fixed potential hangs in parallel library during collective I/O with independent metadata writes diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index d79f7d0b031..4f8a867974e 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -24,30 +24,32 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Bprivate.h" /* B-link trees */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5Fprivate.h" /* Files */ -#include "H5FDprivate.h" /* File drivers */ +#include "H5private.h" /* Generic Functions */ +#include "H5Bprivate.h" /* B-link trees */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fprivate.h" /* Files */ +#include "H5FDprivate.h" /* File drivers */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* IDs */ -#include "H5MFprivate.h" /* File space management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5Oprivate.h" /* Object headers */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MFprivate.h" /* File space management */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5Oprivate.h" /* Object headers */ #include "H5Sprivate.h" /* Dataspaces */ -#include "H5VMprivate.h" /* Vector and array functions */ +#include "H5VMprivate.h" /* Vector and array functions */ /****************/ /* Local Macros */ /****************/ +#define H5D_BTREE_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree.shared) + /******************/ /* Local Typedefs */ /******************/ /* - * B-tree key. A key contains the minimum logical N-dimensional coordinates and + * B-tree key. A key contains the minimum logical N-dimensional coordinates and * the logical size of the chunk to which this key refers. The * fastest-varying dimension is assumed to reference individual bytes of the * array, so a 100-element 1-d array of 4-byte integers would really be a 2-d @@ -61,9 +63,9 @@ * The chunk's file address is part of the B-tree and not part of the key. */ typedef struct H5D_btree_key_t { - hsize_t scaled[H5O_LAYOUT_NDIMS]; /*logical offset to start*/ - uint32_t nbytes; /*size of stored data */ - unsigned filter_mask; /*excluded filters */ + hsize_t scaled[H5O_LAYOUT_NDIMS]; /*logical offset to start */ + uint32_t nbytes; /*size of stored data */ + unsigned filter_mask; /*excluded filters */ } H5D_btree_key_t; /* B-tree callback info for iteration over chunks */ @@ -111,10 +113,14 @@ static herr_t H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const v static herr_t H5D__btree_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__btree_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__btree_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__btree_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__btree_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__btree_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__btree_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__btree_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__btree_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__btree_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__btree_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -137,9 +143,13 @@ const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{ false, /* v1 B-tree indices does not support SWMR access */ H5D__btree_idx_init, /* insert */ H5D__btree_idx_create, /* create */ + H5D__btree_idx_open, /* open */ + H5D__btree_idx_close, /* close */ + H5D__btree_idx_is_open, /* is_open */ H5D__btree_idx_is_space_alloc, /* is_space_alloc */ H5D__btree_idx_insert, /* insert */ H5D__btree_idx_get_addr, /* get_addr */ + H5D__btree_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__btree_idx_iterate, /* iterate */ H5D__btree_idx_remove, /* remove */ @@ -158,21 +168,21 @@ const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{ /* inherits B-tree like properties from H5B */ static H5B_class_t H5B_BTREE[1] = {{ - H5B_CHUNK_ID, /*id */ - sizeof(H5D_btree_key_t), /*sizeof_nkey */ - H5D__btree_get_shared, /*get_shared */ - H5D__btree_new_node, /*new */ - H5D__btree_cmp2, /*cmp2 */ - H5D__btree_cmp3, /*cmp3 */ - H5D__btree_found, /*found */ - H5D__btree_insert, /*insert */ - false, /*follow min branch? */ - false, /*follow max branch? */ - H5B_LEFT, /*critical key */ - H5D__btree_remove, /*remove */ - H5D__btree_decode_key, /*decode */ - H5D__btree_encode_key, /*encode */ - H5D__btree_debug_key /*debug */ + H5B_CHUNK_ID, /* id */ + sizeof(H5D_btree_key_t), /* sizeof_nkey */ + H5D__btree_get_shared, /* get_shared */ + H5D__btree_new_node, /* new */ + H5D__btree_cmp2, /* cmp2 */ + H5D__btree_cmp3, /* cmp3 */ + H5D__btree_found, /* found */ + H5D__btree_insert, /* insert */ + false, /* follow min branch? */ + false, /* follow max branch? */ + H5B_LEFT, /* critical key */ + H5D__btree_remove, /* remove */ + H5D__btree_decode_key, /* decode */ + H5D__btree_encode_key, /* encode */ + H5D__btree_debug_key /* debug */ }}; /*******************/ @@ -183,13 +193,13 @@ static H5B_class_t H5B_BTREE[1] = {{ H5FL_DEFINE_STATIC(H5O_layout_chunk_t); /*------------------------------------------------------------------------- - * Function: H5D__btree_get_shared + * Function: H5D__btree_get_shared * - * Purpose: Returns the shared B-tree info for the specified UDATA. + * Purpose: Returns the shared B-tree info for the specified UDATA. * - * Return: Success: Pointer to the raw B-tree page for this dataset + * Return: Success: Pointer to the raw B-tree page for this dataset * - * Failure: Can't fail + * Failure: Can't fail * *------------------------------------------------------------------------- */ @@ -210,17 +220,17 @@ H5D__btree_get_shared(const H5F_t H5_ATTR_UNUSED *f, const void *_udata) } /* end H5D__btree_get_shared() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_new_node + * Function: H5D__btree_new_node * - * Purpose: Adds a new entry to an i-storage B-tree. We can assume that - * the domain represented by UDATA doesn't intersect the domain - * already represented by the B-tree. + * Purpose: Adds a new entry to an i-storage B-tree. We can assume + * that the domain represented by UDATA doesn't intersect the + * domain already represented by the B-tree. * - * Return: Success: Non-negative. The address of leaf is returned - * through the ADDR argument. It is also added - * to the UDATA. + * Return: Success: Non-negative. The address of leaf is returned + * through the ADDR argument. It is also added + * to the UDATA. * - * Failure: Negative + * Failure: Negative * *------------------------------------------------------------------------- */ @@ -275,18 +285,18 @@ H5D__btree_new_node(H5F_t H5_ATTR_NDEBUG_UNUSED *f, H5B_ins_t op, void *_lt_key, } /* end H5D__btree_new_node() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_cmp2 + * Function: H5D__btree_cmp2 * - * Purpose: Compares two keys sort of like strcmp(). The UDATA pointer - * is only to supply extra information not carried in the keys - * (in this case, the dimensionality) and is not compared - * against the keys. + * Purpose: Compares two keys sort of like strcmp(). The UDATA pointer + * is only to supply extra information not carried in the keys + * (in this case, the dimensionality) and is not compared + * against the keys. * - * Return: Success: -1 if LT_KEY is less than RT_KEY; - * 1 if LT_KEY is greater than RT_KEY; - * 0 if LT_KEY and RT_KEY are equal. + * Return: Success: -1 if LT_KEY is less than RT_KEY; + * 1 if LT_KEY is greater than RT_KEY; + * 0 if LT_KEY and RT_KEY are equal. * - * Failure: FAIL (same as LT_KEYstorage); + assert(H5D_CHUNK_IDX_BTREE == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_BTREE_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__btree_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__btree_idx_is_space_alloc + * + * Purpose: Query if space is allocated for index method + * + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -886,11 +960,11 @@ H5D__btree_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__btree_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_insert + * Function: H5D__btree_idx_insert * - * Purpose: Insert chunk entry into the indexing structure. + * Purpose: Insert chunk entry into the indexing structure. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -922,13 +996,13 @@ H5D__btree_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, } /* H5D__btree_idx_insert() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_get_addr + * Function: H5D__btree_idx_get_addr * - * Purpose: Get the file address of a chunk if file space has been - * assigned. Save the retrieved information in the udata - * supplied. + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -959,14 +1033,34 @@ H5D__btree_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udat } /* H5D__btree_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_iterate_cb + * Function: H5D__btree_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. Currently a no-op. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__btree_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__btree_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__btree_idx_iterate_cb * - * Purpose: Translate the B-tree specific chunk record into a generic + * Purpose: Translate the B-tree specific chunk record into a generic * form and make the callback to the generic chunk callback * routine. * - * Return: Success: Non-negative - * Failure: Negative + * Return: Success: Non-negative + * Failure: Negative * *------------------------------------------------------------------------- */ @@ -1001,12 +1095,12 @@ H5D__btree_idx_iterate_cb(H5F_t H5_ATTR_UNUSED *f, const void *_lt_key, haddr_t } /* H5D__btree_idx_iterate_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_iterate + * Function: H5D__btree_idx_iterate * - * Purpose: Iterate over the chunks in an index, making a callback + * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1043,11 +1137,11 @@ H5D__btree_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t c } /* end H5D__btree_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_remove + * Function: H5D__btree_idx_remove * - * Purpose: Remove chunk from index. + * Purpose: Remove chunk from index. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1077,13 +1171,13 @@ H5D__btree_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t } /* H5D__btree_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_delete + * Function: H5D__btree_idx_delete * - * Purpose: Delete index and raw data storage for entire dataset + * Purpose: Delete index and raw data storage for entire dataset * (i.e. all chunks) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1134,11 +1228,11 @@ H5D__btree_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__btree_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_copy_setup + * Function: H5D__btree_idx_copy_setup * - * Purpose: Set up any necessary information for copying chunks + * Purpose: Set up any necessary information for copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1178,11 +1272,11 @@ H5D__btree_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_ } /* end H5D__btree_idx_copy_setup() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_copy_shutdown + * Function: H5D__btree_idx_copy_shutdown * - * Purpose: Shutdown any information from copying chunks + * Purpose: Shutdown any information from copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1250,11 +1344,11 @@ H5D__btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) } /* end H5D__btree_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_reset + * Function: H5D__btree_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1274,11 +1368,11 @@ H5D__btree_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__btree_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_dump + * Function: H5D__btree_idx_dump * - * Purpose: Dump indexing information to a stream. + * Purpose: Dump indexing information to a stream. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1296,11 +1390,11 @@ H5D__btree_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream) } /* end H5D__btree_idx_dump() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_dest + * Function: H5D__btree_idx_dest * - * Purpose: Release indexing information in memory. + * Purpose: Release indexing information in memory. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1328,11 +1422,11 @@ H5D__btree_idx_dest(const H5D_chk_idx_info_t *idx_info) } /* end H5D__btree_idx_dest() */ /*------------------------------------------------------------------------- - * Function: H5D_btree_debug + * Function: H5D_btree_debug * - * Purpose: Debugs a B-tree node for indexed raw data storage. + * Purpose: Debugs a B-tree node for indexed raw data storage. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index 4da9555d0c0..7a26b6d016c 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -27,16 +27,18 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ +#include "H5Dpkg.h" /* Datasets */ #include "H5FLprivate.h" /* Free Lists */ #include "H5MFprivate.h" /* File space management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5VMprivate.h" /* Vector and array functions */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5VMprivate.h" /* Vector and array functions */ /****************/ /* Local Macros */ /****************/ +#define H5D_BT2_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree2.bt2) + /******************/ /* Local Typedefs */ /******************/ @@ -92,7 +94,6 @@ static herr_t H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, const vo const void *u_ctx); /* Helper routine */ -static herr_t H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info); /* Callback for H5B2_iterate() which is called in H5D__bt2_idx_iterate() */ @@ -114,10 +115,14 @@ static herr_t H5D__bt2_mod_cb(void *_record, void *_op_data, bool *changed); static herr_t H5D__bt2_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__bt2_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__bt2_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__bt2_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -139,9 +144,13 @@ const H5D_chunk_ops_t H5D_COPS_BT2[1] = {{ true, /* Fixed array indices support SWMR access */ H5D__bt2_idx_init, /* init */ H5D__bt2_idx_create, /* create */ + H5D__bt2_idx_open, /* open */ + H5D__bt2_idx_close, /* close */ + H5D__bt2_idx_is_open, /* is_open */ H5D__bt2_idx_is_space_alloc, /* is_space_alloc */ H5D__bt2_idx_insert, /* insert */ H5D__bt2_idx_get_addr, /* get_addr */ + H5D__bt2_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__bt2_idx_iterate, /* iterate */ H5D__bt2_idx_remove, /* remove */ @@ -203,8 +212,8 @@ H5FL_ARR_DEFINE_STATIC(uint32_t, H5O_LAYOUT_NDIMS); * * Purpose: Create client callback context * - * Return: Success: non-NULL - * Failure: NULL + * Return: Success: non-NULL + * Failure: NULL * *------------------------------------------------------------------------- */ @@ -258,8 +267,8 @@ H5D__bt2_crt_context(void *_udata) * * Purpose: Destroy client callback context * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -286,10 +295,10 @@ H5D__bt2_dst_context(void *_ctx) * Function: H5D__bt2_store * * Purpose: Store native information into record for v2 B-tree - * (non-filtered) + * (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -308,8 +317,8 @@ H5D__bt2_store(void *record, const void *_udata) /*------------------------------------------------------------------------- * Function: H5D__bt2_compare * - * Purpose: Compare two native information records, according to some key - * (non-filtered) + * Purpose: Compare two native information records, according to some + * key (non-filtered) * * Return: <0 if rec1 < rec2 * =0 if rec1 == rec2 @@ -341,10 +350,10 @@ H5D__bt2_compare(const void *_udata, const void *_rec2, int *result) * Function: H5D__bt2_unfilt_encode * * Purpose: Encode native information into raw form for storing on disk - * (non-filtered) + * (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -373,10 +382,10 @@ H5D__bt2_unfilt_encode(uint8_t *raw, const void *_record, void *_ctx) * Function: H5D__bt2_unfilt_decode * * Purpose: Decode raw disk form of record into native form - * (non-filtered) + * (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -403,12 +412,12 @@ H5D__bt2_unfilt_decode(const uint8_t *raw, void *_record, void *_ctx) } /* H5D__bt2_unfilt_decode() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_unfilt_debug + * Function: H5D__bt2_unfilt_debug * - * Purpose: Debug native form of record (non-filtered) + * Purpose: Debug native form of record (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -440,10 +449,10 @@ H5D__bt2_unfilt_debug(FILE *stream, int indent, int fwidth, const void *_record, * Function: H5D__bt2_filt_encode * * Purpose: Encode native information into raw form for storing on disk - * (filtered) + * (filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -473,13 +482,13 @@ H5D__bt2_filt_encode(uint8_t *raw, const void *_record, void *_ctx) } /* H5D__bt2_filt_encode() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_filt_decode + * Function: H5D__bt2_filt_decode * - * Purpose: Decode raw disk form of record into native form - * (filtered) + * Purpose: Decode raw disk form of record into native form + * (filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -511,12 +520,12 @@ H5D__bt2_filt_decode(const uint8_t *raw, void *_record, void *_ctx) } /* H5D__bt2_filt_decode() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_filt_debug + * Function: H5D__bt2_filt_debug * - * Purpose: Debug native form of record (filtered) + * Purpose: Debug native form of record (filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -570,13 +579,13 @@ H5D__bt2_idx_init(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, const H5S_t } /* end H5D__bt2_idx_init() */ /*------------------------------------------------------------------------- - * Function: H5D__btree2_idx_depend + * Function: H5D__btree2_idx_depend * - * Purpose: Create flush dependency between v2 B-tree and dataset's + * Purpose: Create flush dependency between v2 B-tree and dataset's * object header. * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -629,63 +638,9 @@ H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info) } /* end H5D__btree2_idx_depend() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_open() - * - * Purpose: Opens an existing v2 B-tree. - * - * Note: This information is passively initialized from each index - * operation callback because those abstract chunk index operations - * are designed to work with the v2 B-tree chunk indices also, - * which don't require an 'open' for the data structure. + * Function: H5D__bt2_idx_create * - * Return: Success: non-negative - * Failure: negative - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info) -{ - H5D_bt2_ctx_ud_t u_ctx; /* user data for creating context */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check args */ - assert(idx_info); - assert(idx_info->f); - assert(idx_info->pline); - assert(idx_info->layout); - assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.btree2.bt2); - - /* Set up the user data */ - u_ctx.f = idx_info->f; - u_ctx.ndims = idx_info->layout->ndims - 1; - u_ctx.chunk_size = idx_info->layout->size; - u_ctx.dim = idx_info->layout->dim; - - /* Open v2 B-tree for the chunk index */ - if (NULL == - (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->storage->idx_addr, &u_ctx))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset"); - - /* Check for SWMR writes to the file */ - if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) - if (H5D__btree2_idx_depend(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, - "unable to create flush dependency on object header"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__bt2_idx_open() */ - -/*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_create - * - * Purpose: Create the v2 B-tree for tracking dataset chunks + * Purpose: Create the v2 B-tree for tracking dataset chunks * * Return: SUCCEED/FAIL * @@ -758,11 +713,120 @@ H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info) } /* end H5D__bt2_idx_create() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_is_space_alloc + * Function: H5D__bt2_idx_open() + * + * Purpose: Opens an existing v2 B-tree. * - * Purpose: Query if space is allocated for index method + * Note: This information is passively initialized from each index + * operation callback because those abstract chunk index + * operations are designed to work with the v2 B-tree chunk + * indices also, which don't require an 'open' for the data + * structure. * - * Return: Non-negative on success/Negative on failure + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_bt2_ctx_ud_t u_ctx; /* user data for creating context */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + assert(idx_info); + assert(idx_info->f); + assert(idx_info->pline); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); + assert(idx_info->storage); + assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(NULL == idx_info->storage->u.btree2.bt2); + + /* Set up the user data */ + u_ctx.f = idx_info->f; + u_ctx.ndims = idx_info->layout->ndims - 1; + u_ctx.chunk_size = idx_info->layout->size; + u_ctx.dim = idx_info->layout->dim; + + /* Open v2 B-tree for the chunk index */ + if (NULL == + (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->storage->idx_addr, &u_ctx))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset"); + + /* Check for SWMR writes to the file */ + if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if (H5D__btree2_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, + "unable to create flush dependency on object header"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_close() + * + * Purpose: Closes an existing v2 B-tree. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_close(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); + assert(idx_info->storage->u.btree2.bt2); + + if (H5B2_close(idx_info->storage->u.btree2.bt2) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close v2 B-tree"); + idx_info->storage->u.btree2.bt2 = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_BT2_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__bt2_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_is_space_alloc + * + * Purpose: Query if space is allocated for index method + * + * Return: true/false * *------------------------------------------------------------------------- */ @@ -778,14 +842,14 @@ H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__bt2_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_mod_cb + * Function: H5D__bt2_mod_cb * - * Purpose: Modify record for dataset chunk when it is found in a v2 B-tree. - * This is the callback for H5B2_update() which is called in - * H5D__bt2_idx_insert(). + * Purpose: Modify record for dataset chunk when it is found in a v2 + * B-tree. This is the callback for H5B2_update() which is + * called in H5D__bt2_idx_insert(). * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -817,18 +881,21 @@ H5D__bt2_mod_cb(void *_record, void *_op_data, bool *changed) } /* end H5D__bt2_mod_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_insert + * Function: H5D__bt2_idx_insert + * + * Purpose: Insert chunk address into the indexing structure. + * A non-filtered chunk: + * Should not exist + * Allocate the chunk and pass chunk address back up + * A filtered chunk: + * If it was not found, create the chunk and pass chunk + * address back up + * If it was found but its size changed, reallocate the chunk + * and pass chunk address back up + * If it was found but its size was the same, pass chunk + * address back up * - * Purpose: Insert chunk address into the indexing structure. - * A non-filtered chunk: - * Should not exist - * Allocate the chunk and pass chunk address back up - * A filtered chunk: - * If it was not found, create the chunk and pass chunk address back up - * If it was found but its size changed, reallocate the chunk and pass chunk address back up - * If it was found but its size was the same, pass chunk address back up - * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -854,7 +921,7 @@ H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, assert(H5_addr_defined(udata->chunk_block.offset)); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -889,14 +956,14 @@ H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, } /* H5D__bt2_idx_insert() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_found_cb + * Function: H5D__bt2_found_cb * - * Purpose: Retrieve record for dataset chunk when it is found in a v2 B-tree. - * This is the callback for H5B2_find() which is called in - * H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert(). + * Purpose: Retrieve record for dataset chunk when it is found in a v2 + * B-tree. This is the callback for H5B2_find() which is called + * in H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert(). * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -911,13 +978,13 @@ H5D__bt2_found_cb(const void *nrecord, void *op_data) } /* H5D__bt2_found_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_get_addr + * Function: H5D__bt2_idx_get_addr * - * Purpose: Get the file address of a chunk if file space has been - * assigned. Save the retrieved information in the udata - * supplied. + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -944,7 +1011,7 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) assert(udata); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1003,16 +1070,59 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) } /* H5D__bt2_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_iterate_cb + * Function: H5D__bt2_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + H5D_chunk_ud_t chunk_ud; + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* + * After opening a dataset that uses a v2 Btree, the root + * node will generally not be read in until an element is + * looked up for the first time. Since there isn't currently + * a good way of controlling that explicitly, perform a fake + * lookup of a chunk to cause it to be read in. + */ + chunk_ud.common.layout = idx_info->layout; + chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.scaled = scaled; + + chunk_ud.chunk_block.offset = HADDR_UNDEF; + chunk_ud.chunk_block.length = 0; + chunk_ud.filter_mask = 0; + chunk_ud.new_unfilt_chunk = false; + chunk_ud.idx_hint = UINT_MAX; + + if (H5D__bt2_idx_get_addr(idx_info, &chunk_ud) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't load v2 B-tree root node"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_iterate_cb * - * Purpose: Translate the B-tree specific chunk record into a generic + * Purpose: Translate the B-tree specific chunk record into a generic * form and make the callback to the generic chunk callback * routine. - * This is the callback for H5B2_iterate() which is called in - * H5D__bt2_idx_iterate(). + * This is the callback for H5B2_iterate() which is called in + * H5D__bt2_idx_iterate(). * - * Return: Success: Non-negative - * Failure: Negative + * Return: Success: Non-negative + * Failure: Negative * *------------------------------------------------------------------------- */ @@ -1033,12 +1143,12 @@ H5D__bt2_idx_iterate_cb(const void *_record, void *_udata) } /* H5D__bt2_idx_iterate_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_iterate + * Function: H5D__bt2_idx_iterate * - * Purpose: Iterate over the chunks in an index, making a callback + * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1062,7 +1172,7 @@ H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chu assert(chunk_udata); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1087,15 +1197,16 @@ H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chu } /* end H5D__bt2_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_remove_cb() + * Function: H5D__bt2_remove_cb() * - * Purpose: Free space for 'dataset chunk' object as v2 B-tree - * is being deleted or v2 B-tree node is removed. - * This is the callback for H5B2_remove() and H5B2_delete() which - * which are called in H5D__bt2_idx_remove() and H5D__bt2_idx_delete(). + * Purpose: Free space for 'dataset chunk' object as v2 B-tree + * is being deleted or v2 B-tree node is removed. + * This is the callback for H5B2_remove() and H5B2_delete() + * which are called in H5D__bt2_idx_remove() and + * H5D__bt2_idx_delete(). * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1121,11 +1232,11 @@ H5D__bt2_remove_cb(const void *_record, void *_udata) } /* H5D__bt2_remove_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_remove + * Function: H5D__bt2_idx_remove * - * Purpose: Remove chunk from index. + * Purpose: Remove chunk from index. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1149,7 +1260,7 @@ H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *u assert(udata); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1180,13 +1291,13 @@ H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *u } /* H5D__bt2_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_delete + * Function: H5D__bt2_idx_delete * - * Purpose: Delete index and raw data storage for entire dataset + * Purpose: Delete index and raw data storage for entire dataset * (i.e. all chunks) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1233,11 +1344,11 @@ H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__bt2_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_copy_setup + * Function: H5D__bt2_idx_copy_setup * - * Purpose: Set up any necessary information for copying chunks + * Purpose: Set up any necessary information for copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1264,7 +1375,7 @@ H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_id assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); /* Check if the source v2 B-tree is open yet */ - if (NULL == idx_info_src->storage->u.btree2.bt2) + if (!H5D_BT2_IDX_IS_OPEN(idx_info_src)) if (H5D__bt2_idx_open(idx_info_src) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1284,11 +1395,11 @@ H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_id } /* end H5D__bt2_idx_copy_setup() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_copy_shutdown + * Function: H5D__bt2_idx_copy_shutdown * - * Purpose: Shutdown any information from copying chunks + * Purpose: Shutdown any information from copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1324,8 +1435,8 @@ H5D__bt2_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, H5O_storage_chunk_t * * Purpose: Retrieve the amount of index storage for chunked dataset * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1355,23 +1466,23 @@ H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) /* Get v2 B-tree size for indexing chunked dataset */ if (H5B2_size(bt2_cdset, index_size) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "can't retrieve v2 B-tree storage info for chunked dataset"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, + "can't retrieve v2 B-tree storage info for chunked dataset"); done: /* Close v2 B-tree index */ - if (bt2_cdset && H5B2_close(bt2_cdset) < 0) - HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for tracking chunked dataset"); - idx_info->storage->u.btree2.bt2 = NULL; + if (H5D__bt2_idx_close(idx_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for tracking chunked dataset"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__bt2_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_reset + * Function: H5D__bt2_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1392,11 +1503,11 @@ H5D__bt2_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__bt2_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_dump + * Function: H5D__bt2_idx_dump * - * Purpose: Dump indexing information to a stream. + * Purpose: Dump indexing information to a stream. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1415,11 +1526,11 @@ H5D__bt2_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream) } /* end H5D__bt2_idx_dump() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_dest + * Function: H5D__bt2_idx_dest * - * Purpose: Release indexing information in memory. + * Purpose: Release indexing information in memory. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1436,16 +1547,14 @@ H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info) assert(idx_info->storage); /* Check if the v2-btree is open */ - if (idx_info->storage->u.btree2.bt2) { - + if (H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in bt2 if needed */ if (H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer"); /* Close v2 B-tree */ - if (H5B2_close(idx_info->storage->u.btree2.bt2) < 0) + if (H5D__bt2_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree"); - idx_info->storage->u.btree2.bt2 = NULL; } /* end if */ done: diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 9f4bd90b68a..41d774d0d3e 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -1124,18 +1124,33 @@ H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) if (H5F_SHARED_HAS_FEATURE(io_info->f_sh, H5FD_FEAT_HAS_MPI) && H5F_shared_get_coll_metadata_reads(io_info->f_sh) && H5D__chunk_is_space_alloc(&dataset->shared->layout.storage)) { - H5D_chunk_ud_t udata; - hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + H5O_storage_chunk_t *sc = &(dataset->shared->layout.storage.u.chunk); + H5D_chk_idx_info_t idx_info; + bool index_is_open; + + idx_info.f = dataset->oloc.file; + idx_info.pline = &dataset->shared->dcpl_cache.pline; + idx_info.layout = &dataset->shared->layout.u.chunk; + idx_info.storage = sc; + + assert(sc && sc->ops && sc->ops->is_open); + if (sc->ops->is_open(&idx_info, &index_is_open) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to check if dataset chunk index is open"); + + if (!index_is_open) { + assert(sc->ops->open); + if (sc->ops->open(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to open dataset chunk index"); + } /* - * TODO: Until the dataset chunk index callback structure has - * callbacks for checking if an index is opened and also for - * directly opening the index, the following fake chunk lookup - * serves the purpose of forcing a chunk index open operation - * on all ranks + * Load any other chunk index metadata that we can, + * such as fixed array data blocks, while we know all + * MPI ranks will do so with collective metadata reads + * enabled */ - if (H5D__chunk_lookup(dataset, scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to collectively open dataset chunk index"); + if (sc->ops->load_metadata && sc->ops->load_metadata(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load additional chunk index metadata"); } #endif @@ -3827,15 +3842,29 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5D_chunk_ud_t *udat idx_info.storage = sc; #ifdef H5_HAVE_PARALLEL - /* Disable collective metadata read for chunk indexes as it is - * highly unlikely that users would read the same chunks from all - * processes. - */ if (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) { - md_reads_file_flag = H5P_FORCE_FALSE; - md_reads_context_flag = false; - H5F_set_coll_metadata_reads(idx_info.f, &md_reads_file_flag, &md_reads_context_flag); - restore_md_reads_state = true; + /* Disable collective metadata read for chunk indexes as it is + * highly unlikely that users would read the same chunks from all + * processes. + */ + if (H5F_get_coll_metadata_reads(idx_info.f)) { +#ifndef NDEBUG + bool index_is_open; + + /* + * The dataset's chunk index should be open at this point. + * Otherwise, we will end up reading it in independently, + * which may not be desired. + */ + sc->ops->is_open(&idx_info, &index_is_open); + assert(index_is_open); +#endif + + md_reads_file_flag = H5P_FORCE_FALSE; + md_reads_context_flag = false; + H5F_set_coll_metadata_reads(idx_info.f, &md_reads_file_flag, &md_reads_context_flag); + restore_md_reads_state = true; + } } #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Dearray.c b/src/H5Dearray.c index c713b6f18bb..965eaacaca3 100644 --- a/src/H5Dearray.c +++ b/src/H5Dearray.c @@ -26,19 +26,21 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5EAprivate.h" /* Extensible arrays */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5EAprivate.h" /* Extensible arrays */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5MFprivate.h" /* File space management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5VMprivate.h" /* Vector functions */ +#include "H5MFprivate.h" /* File space management */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5VMprivate.h" /* Vector functions */ /****************/ /* Local Macros */ /****************/ +#define H5D_EARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.earray.ea) + /* Value to fill unset array elements with */ #define H5D_EARRAY_FILL HADDR_UNDEF #define H5D_EARRAY_FILT_FILL \ @@ -106,10 +108,14 @@ static herr_t H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize static herr_t H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__earray_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__earray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__earray_idx_resize(H5O_layout_chunk_t *layout); static int H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); @@ -125,7 +131,6 @@ static herr_t H5D__earray_idx_dump(const H5O_storage_chunk_t *storage, FILE *str static herr_t H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info); /* Generic extensible array routines */ -static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info); /*********************/ @@ -137,9 +142,13 @@ const H5D_chunk_ops_t H5D_COPS_EARRAY[1] = {{ true, /* Extensible array indices support SWMR access */ H5D__earray_idx_init, /* init */ H5D__earray_idx_create, /* create */ + H5D__earray_idx_open, /* open */ + H5D__earray_idx_close, /* close */ + H5D__earray_idx_is_open, /* is_open */ H5D__earray_idx_is_space_alloc, /* is_space_alloc */ H5D__earray_idx_insert, /* insert */ H5D__earray_idx_get_addr, /* get_addr */ + H5D__earray_idx_load_metadata, /* load_metadata */ H5D__earray_idx_resize, /* resize */ H5D__earray_idx_iterate, /* iterate */ H5D__earray_idx_remove, /* remove */ @@ -270,10 +279,10 @@ H5D__earray_dst_context(void *_ctx) /*------------------------------------------------------------------------- * Function: H5D__earray_fill * - * Purpose: Fill "missing elements" in block of elements + * Purpose: Fill "missing elements" in block of elements * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -705,59 +714,6 @@ H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__earray_idx_depend() */ -/*------------------------------------------------------------------------- - * Function: H5D__earray_idx_open - * - * Purpose: Opens an existing extensible array. - * - * Note: This information is passively initialized from each index - * operation callback because those abstract chunk index operations - * are designed to work with the v1 B-tree chunk indices also, - * which don't require an 'open' for the data structure. - * - * Return: Success: non-negative - * Failure: negative - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info) -{ - H5D_earray_ctx_ud_t udata; /* User data for extensible array open call */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check args */ - assert(idx_info); - assert(idx_info->f); - assert(idx_info->pline); - assert(idx_info->layout); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.earray.ea); - - /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; - - /* Open the extensible array for the chunk index */ - if (NULL == - (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array"); - - /* Check for SWMR writes to the file */ - if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) - if (H5D__earray_idx_depend(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, - "unable to create flush dependency on object header"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__earray_idx_open() */ - /*------------------------------------------------------------------------- * Function: H5D__earray_idx_init * @@ -905,12 +861,120 @@ H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__earray_idx_create() */ +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_open + * + * Purpose: Opens an existing extensible array. + * + * Note: This information is passively initialized from each index + * operation callback because those abstract chunk index + * operations are designed to work with the v1 B-tree chunk + * indices also, which don't require an 'open' for the data + * structure. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_earray_ctx_ud_t udata; /* User data for extensible array open call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + assert(idx_info); + assert(idx_info->f); + assert(idx_info->pline); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(NULL == idx_info->storage->u.earray.ea); + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Open the extensible array for the chunk index */ + if (NULL == + (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array"); + + /* Check for SWMR writes to the file */ + if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if (H5D__earray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, + "unable to create flush dependency on object header"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_close + * + * Purpose: Closes an existing extensible array. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_close(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + assert(idx_info->storage->u.earray.ea); + + if (H5EA_close(idx_info->storage->u.earray.ea) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); + idx_info->storage->u.earray.ea = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_EARRAY_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_idx_is_open() */ + /*------------------------------------------------------------------------- * Function: H5D__earray_idx_is_space_alloc * * Purpose: Query if space is allocated for index method * - * Return: Non-negative on success/Negative on failure + * Return: true/false * *------------------------------------------------------------------------- */ @@ -953,7 +1017,7 @@ H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1021,7 +1085,7 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda assert(udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1086,6 +1150,51 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda FUNC_LEAVE_NOAPI(ret_value) } /* H5D__earray_idx_get_addr() */ +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info) +{ + H5D_chunk_ud_t chunk_ud; + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* + * After opening a dataset that uses an extensible array, + * the extensible array header index block will generally + * not be read in until an element is looked up for the + * first time. Since there isn't currently a good way of + * controlling that explicitly, perform a fake lookup of + * a chunk to cause it to be read in or created if it + * doesn't exist yet. + */ + chunk_ud.common.layout = idx_info->layout; + chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.scaled = scaled; + + chunk_ud.chunk_block.offset = HADDR_UNDEF; + chunk_ud.chunk_block.length = 0; + chunk_ud.filter_mask = 0; + chunk_ud.new_unfilt_chunk = false; + chunk_ud.idx_hint = UINT_MAX; + + if (H5D__earray_idx_get_addr(idx_info, &chunk_ud) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't load extensible array header index block"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__earray_idx_load_metadata() */ + /*------------------------------------------------------------------------- * Function: H5D__earray_idx_resize * @@ -1195,10 +1304,6 @@ H5D__earray_idx_iterate_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void * * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Note: This implementation is slow, particularly for sparse - * extensible arrays, replace it with call to H5EA_iterate() - * when that's available. - * * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- @@ -1223,10 +1328,10 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t assert(chunk_udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, H5_ITER_ERROR, "can't open extensible array"); } else /* Patch the top level file pointer contained in ea if needed */ H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); @@ -1236,7 +1341,7 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t /* Get the extensible array statistics */ if (H5EA_get_stats(ea, &ea_stat) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array statistics"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, H5_ITER_ERROR, "can't query extensible array statistics"); if (ea_stat.stored.max_idx_set > 0) { H5D_earray_it_ud_t udata; /* User data for iteration callback */ @@ -1291,7 +1396,7 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1444,9 +1549,8 @@ H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses"); /* Close extensible array */ - if (H5EA_close(idx_info->storage->u.earray.ea) < 0) + if (H5D__earray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); - idx_info->storage->u.earray.ea = NULL; /* Set up the context user data */ ctx_udata.f = idx_info->f; @@ -1494,7 +1598,7 @@ H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); /* Check if the source extensible array is open yet */ - if (NULL == idx_info_src->storage->u.earray.ea) + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info_src)) /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info_src) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1593,9 +1697,8 @@ H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) done: if (idx_info->storage->u.earray.ea) { - if (H5EA_close(idx_info->storage->u.earray.ea) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); - idx_info->storage->u.earray.ea = NULL; + if (H5D__earray_idx_close(idx_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1673,16 +1776,14 @@ H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info) assert(idx_info->storage); /* Check if the extensible array is open */ - if (idx_info->storage->u.earray.ea) { - + if (H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in ea if needed */ if (H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch earray file pointer"); /* Close extensible array */ - if (H5EA_close(idx_info->storage->u.earray.ea) < 0) + if (H5D__earray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); - idx_info->storage->u.earray.ea = NULL; } /* end if */ done: diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c index 450d466755c..8d06de47b02 100644 --- a/src/H5Dfarray.c +++ b/src/H5Dfarray.c @@ -37,6 +37,8 @@ /* Local Macros */ /****************/ +#define H5D_FARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree2.bt2) + /* Value to fill unset array elements with */ #define H5D_FARRAY_FILL HADDR_UNDEF #define H5D_FARRAY_FILT_FILL \ @@ -105,10 +107,14 @@ static herr_t H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize static herr_t H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__farray_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__farray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -123,7 +129,6 @@ static herr_t H5D__farray_idx_dump(const H5O_storage_chunk_t *storage, FILE *str static herr_t H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info); /* Generic fixed array routines */ -static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info); /*********************/ @@ -135,9 +140,13 @@ const H5D_chunk_ops_t H5D_COPS_FARRAY[1] = {{ true, /* Fixed array indices support SWMR access */ H5D__farray_idx_init, /* init */ H5D__farray_idx_create, /* create */ + H5D__farray_idx_open, /* open */ + H5D__farray_idx_close, /* close */ + H5D__farray_idx_is_open, /* is_open */ H5D__farray_idx_is_space_alloc, /* is_space_alloc */ H5D__farray_idx_insert, /* insert */ H5D__farray_idx_get_addr, /* get_addr */ + H5D__farray_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__farray_idx_iterate, /* iterate */ H5D__farray_idx_remove, /* remove */ @@ -726,55 +735,6 @@ H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNU FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__farray_idx_init() */ -/*------------------------------------------------------------------------- - * Function: H5D__farray_idx_open - * - * Purpose: Opens an existing fixed array and initializes - * the layout struct with information about the storage. - * - * Return: Success: non-negative - * Failure: negative - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info) -{ - H5D_farray_ctx_ud_t udata; /* User data for fixed array open call */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check args */ - assert(idx_info); - assert(idx_info->f); - assert(idx_info->pline); - assert(idx_info->layout); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.farray.fa); - - /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; - - /* Open the fixed array for the chunk index */ - if (NULL == - (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array"); - - /* Check for SWMR writes to the file */ - if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) - if (H5D__farray_idx_depend(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, - "unable to create flush dependency on object header"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__farray_idx_open() */ - /*------------------------------------------------------------------------- * Function: H5D__farray_idx_create * @@ -853,12 +813,115 @@ H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__farray_idx_create() */ +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_open + * + * Purpose: Opens an existing fixed array and initializes + * the layout struct with information about the storage. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_farray_ctx_ud_t udata; /* User data for fixed array open call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + assert(idx_info); + assert(idx_info->f); + assert(idx_info->pline); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(NULL == idx_info->storage->u.farray.fa); + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Open the fixed array for the chunk index */ + if (NULL == + (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array"); + + /* Check for SWMR writes to the file */ + if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if (H5D__farray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, + "unable to create flush dependency on object header"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_close + * + * Purpose: Closes an existing fixed array. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_close(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + assert(idx_info->storage->u.farray.fa); + + if (H5FA_close(idx_info->storage->u.farray.fa) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); + idx_info->storage->u.farray.fa = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_FARRAY_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_idx_is_open() */ + /*------------------------------------------------------------------------- * Function: H5D__farray_idx_is_space_alloc * * Purpose: Query if space is allocated for index method * - * Return: Non-negative on success/Negative on failure + * Return: true/false * *------------------------------------------------------------------------- */ @@ -901,7 +964,7 @@ H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -969,7 +1032,7 @@ H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda assert(udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -1016,6 +1079,50 @@ H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda FUNC_LEAVE_NOAPI(ret_value) } /* H5D__farray_idx_get_addr() */ +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info) +{ + H5D_chunk_ud_t chunk_ud; + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* + * After opening a dataset that uses a fixed array, the + * fixed array data block will generally not be read in + * until an element is looked up for the first time. Since + * there isn't currently a good way of controlling that + * explicitly, perform a fake lookup of a chunk to cause + * it to be read in. + */ + chunk_ud.common.layout = idx_info->layout; + chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.scaled = scaled; + + chunk_ud.chunk_block.offset = HADDR_UNDEF; + chunk_ud.chunk_block.length = 0; + chunk_ud.filter_mask = 0; + chunk_ud.new_unfilt_chunk = false; + chunk_ud.idx_hint = UINT_MAX; + + if (H5D__farray_idx_get_addr(idx_info, &chunk_ud) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't load fixed array data block"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__farray_idx_load_metadata() */ + /*------------------------------------------------------------------------- * Function: H5D__farray_idx_iterate_cb * @@ -1102,7 +1209,7 @@ H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t assert(chunk_udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -1171,7 +1278,7 @@ H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -1302,9 +1409,8 @@ H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses"); /* Close fixed array */ - if (H5FA_close(idx_info->storage->u.farray.fa) < 0) + if (H5D__farray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); - idx_info->storage->u.farray.fa = NULL; /* Set up the user data */ ctx_udata.f = idx_info->f; @@ -1352,10 +1458,11 @@ H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); /* Check if the source fixed array is open yet */ - if (NULL == idx_info_src->storage->u.farray.fa) + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info_src)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info_src) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); + } /* Set copied metadata tag */ H5_BEGIN_TAG(H5AC__COPIED_TAG) @@ -1450,9 +1557,8 @@ H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) done: if (idx_info->storage->u.farray.fa) { - if (H5FA_close(idx_info->storage->u.farray.fa) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); - idx_info->storage->u.farray.fa = NULL; + if (H5D__farray_idx_close(idx_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1528,16 +1634,14 @@ H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info) assert(idx_info->storage); /* Check if the fixed array is open */ - if (idx_info->storage->u.farray.fa) { - + if (H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in fa if needed */ if (H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch fixed array file pointer"); /* Close fixed array */ - if (H5FA_close(idx_info->storage->u.farray.fa) < 0) + if (H5D__farray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); - idx_info->storage->u.farray.fa = NULL; } /* end if */ done: diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 0ef6542fcdb..16243fadf92 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -3024,6 +3024,26 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as * metadata reads are enabled. */ if (H5F_get_coll_metadata_reads(di->dset->oloc.file)) { +#ifndef NDEBUG + { + H5D_chk_idx_info_t idx_info; + bool index_is_open; + + idx_info.f = di->dset->oloc.file; + idx_info.pline = &di->dset->shared->dcpl_cache.pline; + idx_info.layout = &di->dset->shared->layout.u.chunk; + idx_info.storage = &di->dset->shared->layout.storage.u.chunk; + + /* + * The dataset's chunk index should be open at this point. + * Otherwise, we will end up reading it in independently, + * which may not be desired. + */ + idx_info.storage->ops->is_open(&idx_info, &index_is_open); + assert(index_is_open); + } +#endif + md_reads_file_flag = H5P_FORCE_FALSE; md_reads_context_flag = false; H5F_set_coll_metadata_reads(di->dset->oloc.file, &md_reads_file_flag, &md_reads_context_flag); @@ -3446,26 +3466,6 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const chunk_node = H5SL_next(chunk_node); } } - else if (H5F_get_coll_metadata_reads(di[dset_idx].dset->oloc.file)) { - hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; - - /* - * If this rank has no selection in the dataset and collective - * metadata reads are enabled, do a fake lookup of a chunk to - * ensure that this rank has the chunk index opened. Otherwise, - * only the ranks that had a selection will have opened the - * chunk index and they will have done so independently. Therefore, - * when ranks with no selection participate in later collective - * metadata reads, they will try to open the chunk index collectively - * and issues will occur since other ranks won't participate. - * - * In the future, we should consider having a chunk index "open" - * callback that can be used to ensure collectivity between ranks - * in a more natural way, but this hack should suffice for now. - */ - if (H5D__chunk_lookup(di[dset_idx].dset, scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address"); - } /* Reset metadata tagging */ H5AC_tag(prev_tag, NULL); diff --git a/src/H5Dnone.c b/src/H5Dnone.c index 472a2214dc7..d4eb9188840 100644 --- a/src/H5Dnone.c +++ b/src/H5Dnone.c @@ -14,9 +14,9 @@ * Purpose: Implicit (Non Index) chunked I/O functions. * * This is used when the dataset is: - * - extendible but with fixed max. dims - * - with early allocation - * - without filter + * - extendible but with fixed max. dims + * - with early allocation + * - without filter * * The chunk coordinate is mapped into the actual disk addresses * for the chunk without indexing. @@ -31,12 +31,12 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5MFprivate.h" /* File space management */ -#include "H5VMprivate.h" /* Vector functions */ +#include "H5MFprivate.h" /* File space management */ +#include "H5VMprivate.h" /* Vector functions */ /****************/ /* Local Macros */ @@ -52,8 +52,12 @@ /* Non Index chunking I/O ops */ static herr_t H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__none_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__none_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__none_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__none_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__none_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__none_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__none_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -73,9 +77,13 @@ const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{ false, /* Non-indexed chunking don't current support SWMR access */ NULL, /* init */ H5D__none_idx_create, /* create */ + H5D__none_idx_open, /* open */ + H5D__none_idx_close, /* close */ + H5D__none_idx_is_open, /* is_open */ H5D__none_idx_is_space_alloc, /* is_space_alloc */ NULL, /* insert */ H5D__none_idx_get_addr, /* get_addr */ + H5D__none_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__none_idx_iterate, /* iterate */ H5D__none_idx_remove, /* remove */ @@ -97,12 +105,12 @@ const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{ /*******************/ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_create + * Function: H5D__none_idx_create * - * Purpose: Allocate memory for the maximum # of chunks in the dataset. + * Purpose: Allocate memory for the maximum # of chunks in the dataset. * - * Return: Non-negative on success - * Negative on failure. + * Return: Non-negative on success + * Negative on failure. * *------------------------------------------------------------------------- */ @@ -141,11 +149,73 @@ H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info) } /* end H5D__none_idx_create() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_is_space_alloc + * Function: H5D__none_idx_open * - * Purpose: Query if space for the dataset chunks is allocated + * Purpose: Opens an existing "none" index. Currently a no-op. * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_open(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__none_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_close + * + * Purpose: Closes an existing "none" index. Currently a no-op. + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_close(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__none_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_is_open(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_NONE == idx_info->storage->idx_type); + assert(is_open); + + *is_open = true; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__none_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_is_space_alloc + * + * Purpose: Query if space for the dataset chunks is allocated + * + * Return: true/false * *------------------------------------------------------------------------- */ @@ -161,12 +231,12 @@ H5D__none_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__none_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_get_addr + * Function: H5D__none_idx_get_addr * - * Purpose: Get the file address of a chunk. - * Save the retrieved information in the udata supplied. + * Purpose: Get the file address of a chunk. + * Save the retrieved information in the udata supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -200,12 +270,32 @@ H5D__none_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata } /* H5D__none_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_iterate + * Function: H5D__none_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. Currently a no-op. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__none_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_iterate * - * Purpose: Iterate over the chunks in an index, making a callback + * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -275,13 +365,13 @@ H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t ch } /* end H5D__none_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_remove + * Function: H5D__none_idx_remove * - * Purpose: Remove chunk from index. + * Purpose: Remove chunk from index. * - * Note: Chunks can't be removed (or added) to datasets with this - * form of index - all the space for all the chunks is always - * allocated in the file. + * Note: Chunks can't be removed (or added) to datasets with this + * form of index - all the space for all the chunks is always + * allocated in the file. * * Return: Non-negative on success/Negative on failure * @@ -299,12 +389,12 @@ H5D__none_idx_remove(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, } /* H5D__none_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_delete + * Function: H5D__none_idx_delete * - * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) + * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -337,11 +427,11 @@ H5D__none_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__none_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_copy_setup + * Function: H5D__none_idx_copy_setup * - * Purpose: Set up any necessary information for copying chunks + * Purpose: Set up any necessary information for copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -407,11 +497,11 @@ H5D__none_idx_size(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, hsize_t *i } /* end H5D__none_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_reset + * Function: H5D__none_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -431,11 +521,11 @@ H5D__none_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__none_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_dump + * Function: H5D__none_idx_dump * - * Purpose: Dump + * Purpose: Dump * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 82fec0ea1ff..a3695ae8544 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -393,10 +393,14 @@ typedef int (*H5D_chunk_cb_func_t)(const H5D_chunk_rec_t *chunk_rec, void *udata typedef herr_t (*H5D_chunk_init_func_t)(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); typedef herr_t (*H5D_chunk_create_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_open_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_close_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_is_open_func_t)(const H5D_chk_idx_info_t *idx_info, bool *is_open); typedef bool (*H5D_chunk_is_space_alloc_func_t)(const H5O_storage_chunk_t *storage); typedef herr_t (*H5D_chunk_insert_func_t)(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); typedef herr_t (*H5D_chunk_get_addr_func_t)(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +typedef herr_t (*H5D_chunk_load_metadata_func_t)(const H5D_chk_idx_info_t *idx_info); typedef herr_t (*H5D_chunk_resize_func_t)(H5O_layout_chunk_t *layout); typedef int (*H5D_chunk_iterate_func_t)(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); @@ -413,13 +417,18 @@ typedef herr_t (*H5D_chunk_dest_func_t)(const H5D_chk_idx_info_t *idx_info); /* Typedef for grouping chunk I/O routines */ typedef struct H5D_chunk_ops_t { - bool can_swim; /* Flag to indicate that the index supports SWMR access */ - H5D_chunk_init_func_t init; /* Routine to initialize indexing information in memory */ - H5D_chunk_create_func_t create; /* Routine to create chunk index */ + bool can_swim; /* Flag to indicate that the index supports SWMR access */ + H5D_chunk_init_func_t init; /* Routine to initialize indexing information in memory */ + H5D_chunk_create_func_t create; /* Routine to create chunk index */ + H5D_chunk_open_func_t open; /* Routine to open chunk index */ + H5D_chunk_close_func_t close; /* Routine to close chunk index */ + H5D_chunk_is_open_func_t is_open; /* Query routine to determine if index is open or not */ H5D_chunk_is_space_alloc_func_t - is_space_alloc; /* Query routine to determine if storage/index is allocated */ - H5D_chunk_insert_func_t insert; /* Routine to insert a chunk into an index */ - H5D_chunk_get_addr_func_t get_addr; /* Routine to retrieve address of chunk in file */ + is_space_alloc; /* Query routine to determine if storage/index is allocated */ + H5D_chunk_insert_func_t insert; /* Routine to insert a chunk into an index */ + H5D_chunk_get_addr_func_t get_addr; /* Routine to retrieve address of chunk in file */ + H5D_chunk_load_metadata_func_t + load_metadata; /* Routine to load additional chunk index metadata, such as fixed array data blocks */ H5D_chunk_resize_func_t resize; /* Routine to update chunk index info after resizing dataset */ H5D_chunk_iterate_func_t iterate; /* Routine to iterate over chunks */ H5D_chunk_remove_func_t remove; /* Routine to remove a chunk from an index */ diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c index 9cb18d35278..dd9f2353d7b 100644 --- a/src/H5Dsingle.c +++ b/src/H5Dsingle.c @@ -27,12 +27,12 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5MFprivate.h" /* File space management */ -#include "H5VMprivate.h" /* Vector functions */ +#include "H5MFprivate.h" /* File space management */ +#include "H5VMprivate.h" /* Vector functions */ /****************/ /* Local Macros */ @@ -50,10 +50,14 @@ static herr_t H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__single_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__single_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__single_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__single_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__single_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__single_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__single_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__single_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__single_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__single_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -73,9 +77,13 @@ const H5D_chunk_ops_t H5D_COPS_SINGLE[1] = {{ false, /* Single Chunk indexing doesn't current support SWMR access */ H5D__single_idx_init, /* init */ H5D__single_idx_create, /* create */ + H5D__single_idx_open, /* open */ + H5D__single_idx_close, /* close */ + H5D__single_idx_is_open, /* is_open */ H5D__single_idx_is_space_alloc, /* is_space_alloc */ H5D__single_idx_insert, /* insert */ H5D__single_idx_get_addr, /* get_addr */ + H5D__single_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__single_idx_iterate, /* iterate */ H5D__single_idx_remove, /* remove */ @@ -133,12 +141,12 @@ H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNU } /* end H5D__single_idx_init() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_create + * Function: H5D__single_idx_create * - * Purpose: Set up Single Chunk Index: filtered or non-filtered + * Purpose: Set up Single Chunk Index: filtered or non-filtered * - * Return: Non-negative on success - * Negative on failure. + * Return: Non-negative on success + * Negative on failure. * *------------------------------------------------------------------------- */ @@ -166,11 +174,73 @@ H5D__single_idx_create(const H5D_chk_idx_info_t *idx_info) } /* end H5D__single_idx_create() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_is_space_alloc + * Function: H5D__single_idx_open * - * Purpose: Query if space is allocated for the single chunk + * Purpose: Opens an existing "single" index. Currently a no-op. * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_open(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__single_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_close + * + * Purpose: Closes an existing "single" index. Currently a no-op. + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_close(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__single_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_is_open(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_SINGLE == idx_info->storage->idx_type); + assert(is_open); + + *is_open = true; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__single_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_is_space_alloc + * + * Purpose: Query if space is allocated for the single chunk + * + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -186,11 +256,11 @@ H5D__single_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__single_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_insert + * Function: H5D__single_idx_insert * - * Purpose: Allocate space for the single chunk + * Purpose: Allocate space for the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -231,12 +301,12 @@ H5D__single_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata } /* H5D__single_idx_insert() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_get_addr + * Function: H5D__single_idx_get_addr * - * Purpose: Get the file address of a chunk. - * Save the retrieved information in the udata supplied. + * Purpose: Get the file address of a chunk. + * Save the retrieved information in the udata supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -271,11 +341,31 @@ H5D__single_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda } /* H5D__single_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_iterate + * Function: H5D__single_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. Currently a no-op. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__single_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_iterate * - * Purpose: Make callback for the single chunk + * Purpose: Make callback for the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -318,11 +408,11 @@ H5D__single_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t } /* end H5D__single_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_remove + * Function: H5D__single_idx_remove * - * Purpose: Remove the single chunk + * Purpose: Remove the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -357,12 +447,13 @@ H5D__single_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t } /* H5D__single_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_delete + * Function: H5D__single_idx_delete * - * Purpose: Delete raw data storage for entire dataset (i.e. the only chunk) + * Purpose: Delete raw data storage for entire dataset (i.e. the only + * chunk) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -389,11 +480,12 @@ H5D__single_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__single_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_copy_setup + * Function: H5D__single_idx_copy_setup * - * Purpose: Set up any necessary information for copying the single chunk + * Purpose: Set up any necessary information for copying the single + * chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -457,11 +549,11 @@ H5D__single_idx_size(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, hsize_t } /* end H5D__single_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_reset + * Function: H5D__single_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -481,11 +573,11 @@ H5D__single_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__single_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_dump + * Function: H5D__single_idx_dump * - * Purpose: Dump the address of the single chunk + * Purpose: Dump the address of the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ From 1900cc63eb14a240ce988a0cd99d1e6babd528f1 Mon Sep 17 00:00:00 2001 From: Neil Fortner Date: Mon, 23 Oct 2023 21:08:48 -0500 Subject: [PATCH 056/101] Fix failure in t_select_io_dset when run with more than 10 ranks (#3758) --- testpar/t_select_io_dset.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/testpar/t_select_io_dset.c b/testpar/t_select_io_dset.c index 2be2b407236..9d3f1205051 100644 --- a/testpar/t_select_io_dset.c +++ b/testpar/t_select_io_dset.c @@ -221,6 +221,26 @@ check_actual_selection_io_mode(hid_t dxpl, uint32_t sel_io_mode_expected) } } +/* + * Helper routine to check actual selection I/O mode on a dxpl + */ +static void +check_actual_selection_io_mode_either(hid_t dxpl, uint32_t sel_io_mode_expected1, + uint32_t sel_io_mode_expected2) +{ + uint32_t actual_sel_io_mode; + + if (H5Pget_actual_selection_io_mode(dxpl, &actual_sel_io_mode) < 0) + P_TEST_ERROR; + if (actual_sel_io_mode != sel_io_mode_expected1 && actual_sel_io_mode != sel_io_mode_expected2) { + if (MAINPROCESS) + printf("\n Failed: Incorrect selection I/O mode (expected/actual) %u or %u : %u", + (unsigned)sel_io_mode_expected1, (unsigned)sel_io_mode_expected2, + (unsigned)actual_sel_io_mode); + P_TEST_ERROR; + } +} + /* * Case 1: single dataset read/write, no type conversion (null case) */ @@ -327,8 +347,14 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; testing_check_io_mode(dxpl, exp_io_mode); - if (chunked && !dtrans) - check_actual_selection_io_mode(dxpl, H5D_VECTOR_IO); + if (chunked && !dtrans) { + /* If there are more ranks than chunks, then some ranks will not perform vector I/O due to how the + * parallel compression code redistributes data */ + if ((hsize_t)mpi_size > (dims[0] / cdims[0])) + check_actual_selection_io_mode_either(dxpl, H5D_VECTOR_IO, 0); + else + check_actual_selection_io_mode(dxpl, H5D_VECTOR_IO); + } else check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); From ea1714b3035b9de6cac885508c9f4d882c817b3f Mon Sep 17 00:00:00 2001 From: Glenn Song <43005495+glennsong09@users.noreply.github.com> Date: Tue, 24 Oct 2023 12:51:55 -0500 Subject: [PATCH 057/101] Fix H5Pset_evict_on_close failing regardless of actual parallel use (#3761) Allow H5Pset_evict_on_close to be called regardless of whether a parallel build of HDF5 is being used Fail during file opens if H5Pset_evict_on_close has been set to true on the given File Access Property List and the size of the MPI communicator being used is greater than 1 --- src/H5Fint.c | 18 ++++++++- src/H5Pfapl.c | 7 +--- test/evict_on_close.c | 92 ------------------------------------------- testpar/t_file.c | 59 +++++++++++++++++++++++++++ testpar/testphdf5.c | 3 ++ testpar/testphdf5.h | 1 + 6 files changed, 80 insertions(+), 100 deletions(-) diff --git a/src/H5Fint.c b/src/H5Fint.c index 014f619d8a9..4093b4b7c45 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -1968,6 +1968,22 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get minimum raw data fraction of page buffer"); } /* end if */ + /* Get the evict on close setting */ + if (H5P_get(a_plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get evict on close value"); + +#ifdef H5_HAVE_PARALLEL + /* Check for evict on close in parallel (currently unsupported) */ + assert(file->shared); + if (H5F_SHARED_HAS_FEATURE(file->shared, H5FD_FEAT_HAS_MPI)) { + int mpi_size = H5F_shared_mpi_get_size(file->shared); + + if ((mpi_size > 1) && evict_on_close) + HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, NULL, + "evict on close is currently not supported in parallel HDF5"); + } +#endif + /* * Read or write the file superblock, depending on whether the file is * empty or not. @@ -2046,8 +2062,6 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id) * or later, verify that the access property list value matches the value * in shared file structure. */ - if (H5P_get(a_plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get evict on close value"); if (shared->nrefs == 1) shared->evict_on_close = evict_on_close; else if (shared->nrefs > 1) { diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index dc122af9393..e7c1fb3acb8 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -4848,7 +4848,7 @@ H5P__facc_mdc_log_location_close(const char H5_ATTR_UNUSED *name, size_t H5_ATTR *------------------------------------------------------------------------- */ herr_t -H5Pset_evict_on_close(hid_t fapl_id, hbool_t H5_ATTR_PARALLEL_UNUSED evict_on_close) +H5Pset_evict_on_close(hid_t fapl_id, hbool_t evict_on_close) { H5P_genplist_t *plist; /* property list pointer */ herr_t ret_value = SUCCEED; /* return value */ @@ -4864,14 +4864,9 @@ H5Pset_evict_on_close(hid_t fapl_id, hbool_t H5_ATTR_PARALLEL_UNUSED evict_on_cl if (NULL == (plist = (H5P_genplist_t *)H5I_object(fapl_id))) HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "can't find object for ID"); -#ifndef H5_HAVE_PARALLEL /* Set value */ if (H5P_set(plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set evict on close property"); -#else - HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, - "evict on close is currently not supported in parallel HDF5"); -#endif /* H5_HAVE_PARALLEL */ done: FUNC_LEAVE_API(ret_value) diff --git a/test/evict_on_close.c b/test/evict_on_close.c index 9ca7f9f9cf3..db2a96282ef 100644 --- a/test/evict_on_close.c +++ b/test/evict_on_close.c @@ -32,12 +32,6 @@ #include "H5Ipkg.h" #include "H5VLprivate.h" /* Virtual Object Layer */ -/* Evict on close is not supported under parallel at this time. - * In the meantime, we just run a simple check that EoC can't be - * enabled in parallel HDF5. - */ -#ifndef H5_HAVE_PARALLEL - /* Uncomment to manually inspect cache states */ /* (Requires debug build of the library) */ /* #define EOC_MANUAL_INSPECTION */ @@ -974,89 +968,3 @@ main(void) exit(EXIT_FAILURE); } /* end main() */ - -#else - -/*------------------------------------------------------------------------- - * Function: check_evict_on_close_parallel_fail() - * - * Purpose: Verify that the H5Pset_evict_on_close() call fails in - * parallel HDF5. - * - * Return: SUCCEED/FAIL - * - *------------------------------------------------------------------------- - */ -static herr_t -check_evict_on_close_parallel_fail(void) -{ - hid_t fapl_id = H5I_INVALID_HID; - bool evict_on_close; - herr_t status; - - TESTING("evict on close fails in parallel"); - - /* Create a fapl */ - if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) - TEST_ERROR; - - /* Set the evict on close property (should fail)*/ - evict_on_close = true; - H5E_BEGIN_TRY - { - status = H5Pset_evict_on_close(fapl_id, evict_on_close); - } - H5E_END_TRY - if (status >= 0) - FAIL_PUTS_ERROR("H5Pset_evict_on_close() did not fail in parallel HDF5."); - - /* close fapl */ - if (H5Pclose(fapl_id) < 0) - TEST_ERROR; - - PASSED(); - return SUCCEED; - -error: - H5_FAILED(); - return FAIL; - -} /* check_evict_on_close_parallel_fail() */ - -/*------------------------------------------------------------------------- - * Function: main (parallel version) - * - * Return: EXIT_FAILURE/EXIT_SUCCESS - * - *------------------------------------------------------------------------- - */ -int -main(void) -{ - unsigned nerrors = 0; /* number of test errors */ - - printf("Testing evict-on-close cache behavior\n"); - - /* Initialize */ - h5_reset(); - - /* Test that EoC fails in parallel HDF5 */ - nerrors += check_evict_on_close_parallel_fail() < 0 ? 1 : 0; - - if (nerrors) - goto error; - - printf("All evict-on-close tests passed.\n"); - printf("Note that EoC is not supported under parallel so most tests are skipped.\n"); - - exit(EXIT_SUCCESS); - -error: - - printf("***** %u evict-on-close test%s FAILED! *****\n", nerrors, nerrors > 1 ? "S" : ""); - - exit(EXIT_FAILURE); - -} /* main() - parallel */ - -#endif /* H5_HAVE_PARALLEL */ diff --git a/testpar/t_file.c b/testpar/t_file.c index a6a541becf3..700ccc2256f 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -1060,3 +1060,62 @@ test_invalid_libver_bounds_file_close_assert(void) ret = H5Pclose(fcpl_id); VRFY((SUCCEED == ret), "H5Pclose"); } + +/* + * Tests that H5Pevict_on_close properly succeeds in serial/one rank and fails when + * called by multiple ranks. + */ +void +test_evict_on_close_parallel_unsupp(void) +{ + const char *filename = NULL; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + hid_t fid = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + herr_t ret; + + filename = (const char *)GetTestParameters(); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* setup file access plist */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + ret = H5Pset_libver_bounds(fapl_id, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18); + VRFY((SUCCEED == ret), "H5Pset_libver_bounds"); + + ret = H5Pset_evict_on_close(fapl_id, true); + VRFY((SUCCEED == ret), "H5Pset_evict_on_close"); + + /* test on 1 rank */ + ret = H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, info); + VRFY((SUCCEED == ret), "H5Pset_fapl_mpio"); + + if (mpi_rank == 0) { + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((SUCCEED == ret), "H5Fcreate"); + ret = H5Fclose(fid); + VRFY((SUCCEED == ret), "H5Fclose"); + } + + VRFY((MPI_SUCCESS == MPI_Barrier(MPI_COMM_WORLD)), "MPI_Barrier"); + + /* test on multiple ranks if we have them */ + if (mpi_size > 1) { + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((SUCCEED == ret), "H5Pset_fapl_mpio"); + + H5E_BEGIN_TRY + { + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + } + H5E_END_TRY + VRFY((fid == H5I_INVALID_HID), "H5Fcreate"); + } + + ret = H5Pclose(fapl_id); + VRFY((SUCCEED == ret), "H5Pclose"); +} diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 2d85e1ae289..2428c719a1d 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -366,6 +366,9 @@ main(int argc, char **argv) AddTest("invlibverassert", test_invalid_libver_bounds_file_close_assert, NULL, "Invalid libver bounds assertion failure", PARATESTFILE); + AddTest("evictparassert", test_evict_on_close_parallel_unsupp, NULL, "Evict on close in parallel failure", + PARATESTFILE); + AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE); AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 5699760c61b..6bbdb0d23d5 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -233,6 +233,7 @@ void zero_dim_dset(void); void test_file_properties(void); void test_delete(void); void test_invalid_libver_bounds_file_close_assert(void); +void test_evict_on_close_parallel_unsupp(void); void multiple_dset_write(void); void multiple_group_write(void); void multiple_group_read(void); From fbf77a8b1620cd23e707294e22693caaf79c60ab Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 24 Oct 2023 16:28:22 -0500 Subject: [PATCH 058/101] Add release note for H5Pset_evict_on_close change for parallel HDF5 (#3765) --- release_docs/RELEASE.txt | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index e5cb09707bd..291f72a8701 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -320,6 +320,18 @@ New Features performing I/O on all the filtered datasets at once and then performing I/O on all the unfiltered datasets at once. + - Changed H5Pset_evict_on_close so that it can be called with a parallel + build of HDF5 + + Previously, H5Pset_evict_on_close would always fail when called from a + parallel build of HDF5, stating that the feature is not supported with + parallel HDF5. This failure would occur even if a parallel build of HDF5 + was used with a serial HDF5 application. H5Pset_evict_on_close can now + be called regardless of the library build type and the library will + instead fail during H5Fcreate/H5Fopen if the "evict on close" property + has been set to true and the file is being opened for parallel access + with more than 1 MPI process. + Fortran Library: ---------------- From 6578c452b5d7b0db82d3cf4e58f8a2a11e909b52 Mon Sep 17 00:00:00 2001 From: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Date: Tue, 24 Oct 2023 21:48:28 -0500 Subject: [PATCH 059/101] Preserve MPI-I/O file hints when fapl is closed (#3755) * Fix for issue #3025: Save the MPI info in the file struct so H5Fget_access_plist() can retrieve it from there. --- src/H5Fint.c | 23 +++++++--- src/H5Fpkg.h | 1 + testpar/t_file.c | 102 ++++++++++++++++++++++++++++++++++++++++++++ testpar/testphdf5.c | 2 + testpar/testphdf5.h | 1 + 5 files changed, 124 insertions(+), 5 deletions(-) diff --git a/src/H5Fint.c b/src/H5Fint.c index 4093b4b7c45..439fa4f35a6 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -402,7 +402,6 @@ H5F_get_access_plist(H5F_t *f, bool app_ref) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set collective metadata read flag"); if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { MPI_Comm mpi_comm; - MPI_Info mpi_info; /* Retrieve and set MPI communicator */ if (MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(f))) @@ -410,10 +409,8 @@ H5F_get_access_plist(H5F_t *f, bool app_ref) if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_COMM_NAME, &mpi_comm) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI communicator"); - /* Retrieve and set MPI info object */ - if (H5P_get(old_plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &mpi_info) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "can't get MPI info object"); - if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &mpi_info) < 0) + /* Retrieve MPI info object */ + if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &(f->shared->mpi_info)) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI info object"); } #endif /* H5_HAVE_PARALLEL */ @@ -1133,6 +1130,12 @@ H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5F /* initialize point of no return */ f->shared->point_of_no_return = false; +#ifdef H5_HAVE_PARALLEL + /* Initialize this just in case we fail before setting this field and */ + /* we try to call H5_mpi_info_free() on uninitialized memory in H5F__dest() */ + f->shared->mpi_info = MPI_INFO_NULL; +#endif /* H5_HAVE_PARALLEL */ + /* Copy the file creation and file access property lists into the * new file handle. We do this early because some values might need * to change as the file is being opened. @@ -1209,6 +1212,8 @@ H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5F HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata read flag"); if (H5P_get(plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->shared->coll_md_write)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata write flag"); + if (H5P_get(plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &(f->shared->mpi_info)) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't set MPI info object"); #endif /* H5_HAVE_PARALLEL */ if (H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0) @@ -1414,6 +1419,14 @@ H5F__dest(H5F_t *f, bool flush, bool free_on_failure) f->shared->efc = NULL; } /* end if */ +#ifdef H5_HAVE_PARALLEL + if (f->shared->mpi_info != MPI_INFO_NULL) { + /* Free MPI info saved in the file struct */ + if (H5_mpi_info_free(&f->shared->mpi_info) < 0) + HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't free MPI info"); + } +#endif + /* With the shutdown modifications, the contents of the metadata cache * should be clean at this point, with the possible exception of the * the superblock and superblock extension. diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index bc5c90bd5da..e81b25072eb 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -359,6 +359,7 @@ struct H5F_shared_t { #ifdef H5_HAVE_PARALLEL H5P_coll_md_read_flag_t coll_md_read; /* Do all metadata reads collectively */ bool coll_md_write; /* Do all metadata writes collectively */ + MPI_Info mpi_info; /* MPI info */ #endif /* H5_HAVE_PARALLEL */ }; diff --git a/testpar/t_file.c b/testpar/t_file.c index 700ccc2256f..ce55270cdd2 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -1119,3 +1119,105 @@ test_evict_on_close_parallel_unsupp(void) ret = H5Pclose(fapl_id); VRFY((SUCCEED == ret), "H5Pclose"); } + +/* + * Verify that MPI I/O hints are preserved after closing the file access property list + * as described in issue #3025 + * This is a test program from the user. + */ +void +test_fapl_preserve_hints(void) +{ + hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ + hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ + const char *filename; + + int nkeys_used; + bool same = false; + + MPI_Info info = MPI_INFO_NULL; + const char *key = "hdf_info_fapl"; + const char *value = "xyz"; + + MPI_Info info_used = MPI_INFO_NULL; + int flag = -1; + char value_used[20]; + char key_used[20]; + + int i; + herr_t ret; /* Generic return value */ + int mpi_ret; /* MPI return value */ + + filename = (const char *)GetTestParameters(); + + /* set up MPI parameters */ + mpi_ret = MPI_Info_create(&info); + VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); + + mpi_ret = MPI_Info_set(info, key, value); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set succeeded"); + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + + ret = H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio"); + + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + fapl_id = H5Fget_access_plist(fid); + VRFY((fapl_id != H5I_INVALID_HID), "H5Fget_access_plist succeeded"); + + ret = H5Pget_fapl_mpio(fapl_id, NULL, &info_used); + VRFY((ret >= 0), "H5Pget_fapl_mpio succeeded"); + + VRFY((info_used != MPI_INFO_NULL), "H5Pget_fapl_mpio"); + + mpi_ret = MPI_Info_get_nkeys(info_used, &nkeys_used); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get_nkeys succeeded"); + + /* Loop over the # of keys */ + for (i = 0; i < nkeys_used; i++) { + + /* Memset the buffers to zero */ + memset(key_used, 0, 20); + memset(value_used, 0, 20); + + /* Get the nth key */ + mpi_ret = MPI_Info_get_nthkey(info_used, i, key_used); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get_nthkey succeeded"); + + if (!strcmp(key_used, key)) { + + mpi_ret = MPI_Info_get(info_used, key_used, 20, value_used, &flag); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get succeeded"); + + if (!strcmp(value_used, value)) { + + /* Both key_used and value_used are the same */ + same = true; + break; + } + } + } /* end for */ + + VRFY((same == true), "key_used and value_used are the same"); + + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Free the MPI info object */ + mpi_ret = MPI_Info_free(&info); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + + mpi_ret = MPI_Info_free(&info_used); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + +} /* end test_fapl_preserve_hints() */ diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 2428c719a1d..985c3de734c 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -368,6 +368,8 @@ main(int argc, char **argv) AddTest("evictparassert", test_evict_on_close_parallel_unsupp, NULL, "Evict on close in parallel failure", PARATESTFILE); + AddTest("fapl_preserve", test_fapl_preserve_hints, NULL, "preserve MPI I/O hints after fapl closed", + PARATESTFILE); AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE); AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 6bbdb0d23d5..45f1a945d07 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -234,6 +234,7 @@ void test_file_properties(void); void test_delete(void); void test_invalid_libver_bounds_file_close_assert(void); void test_evict_on_close_parallel_unsupp(void); +void test_fapl_preserve_hints(void); void multiple_dset_write(void); void multiple_group_write(void); void multiple_group_read(void); From 1d8513f034864e690d6006d5b6a324e2edc2d3ea Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Wed, 25 Oct 2023 13:49:30 -0500 Subject: [PATCH 060/101] Add compression tests for subfiling (#3769) --- testpar/t_subfiling_vfd.c | 142 +++++++++++++++++++++++++++++++++----- 1 file changed, 125 insertions(+), 17 deletions(-) diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index 7c565997b3d..72613a3bef1 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -40,6 +40,8 @@ #define PATH_MAX 4096 #endif +#define DEFAULT_DEFLATE_LEVEL 9 + #define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0]) #define CHECK_PASSED() \ @@ -82,12 +84,15 @@ static char *config_dir = NULL; int nerrors = 0; int curr_nerrors = 0; +bool enable_compression = false; + /* Function pointer typedef for test functions */ typedef void (*test_func)(void); /* Utility functions */ static hid_t create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, bool custom_config, H5FD_subfiling_params_t *custom_cfg, int32_t thread_pool_size); +static hid_t create_dcpl_id(int rank, const hsize_t dims[], hid_t dxpl_id); /* Test functions */ static void test_create_and_close(void); @@ -182,7 +187,47 @@ create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, bool custom_config, return H5I_INVALID_HID; } +/* --------------------------------------------------------------------------- + * Function: create_dcpl_id + * + * Purpose: Creates dataset creation property list identifier with + * chunking and compression, and enforces the + * required collective IO. + * + * Return: Success: HID Dataset creation property list identifier, + * a non-negative value. + * Failure: H5I_INVALID_HID, a negative value. + * --------------------------------------------------------------------------- + */ +static hid_t +create_dcpl_id(int rank, const hsize_t dset_dims[], hid_t dxpl_id) +{ + hsize_t chunk_dims[1]; + hid_t ret_value = H5I_INVALID_HID; + + if ((ret_value = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (enable_compression) { + if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) + TEST_ERROR; + chunk_dims[0] = dset_dims[0] / 2; + if (H5Pset_chunk(ret_value, rank, chunk_dims) < 0) + TEST_ERROR; + if (H5Pset_deflate(ret_value, DEFAULT_DEFLATE_LEVEL) < 0) + TEST_ERROR; + } + + return ret_value; +error: + if ((H5I_INVALID_HID != ret_value) && (H5Pclose(ret_value) < 0)) { + H5_FAILED(); + AT(); + } + + return H5I_INVALID_HID; +} /* * A simple test that creates and closes a file with the * subfiling VFD @@ -1060,6 +1105,7 @@ test_read_different_stripe_size(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; char *tmp_filename = NULL; void *buf = NULL; @@ -1106,7 +1152,10 @@ test_read_different_stripe_size(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1129,6 +1178,7 @@ test_read_different_stripe_size(void) VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); /* Ensure all the subfiles are present */ @@ -1153,10 +1203,12 @@ test_read_different_stripe_size(void) VRFY((fclose(subfile_ptr) >= 0), "fclose on subfile succeeded"); /* Check file size */ - VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); - subfile_size = (h5_stat_size_t)subfile_info.st_size; + if (!enable_compression) { + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + subfile_size = (h5_stat_size_t)subfile_info.st_size; - VRFY((subfile_size >= cfg.stripe_size), "File size verification succeeded"); + VRFY((subfile_size >= cfg.stripe_size), "File size verification succeeded"); + } } } @@ -1376,10 +1428,12 @@ test_subfiling_precreate_rank_0(void) VRFY((fclose(subfile_ptr) >= 0), "fclose on subfile succeeded"); /* Check file size */ - VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); - file_size = (h5_stat_size_t)subfile_info.st_size; + if (!enable_compression) { + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + file_size = (h5_stat_size_t)subfile_info.st_size; - VRFY((file_size >= cfg.stripe_size), "File size verification succeeded"); + VRFY((file_size >= cfg.stripe_size), "File size verification succeeded"); + } } /* Verify that there aren't too many subfiles */ @@ -1470,6 +1524,7 @@ test_subfiling_write_many_read_one(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; @@ -1517,7 +1572,10 @@ test_subfiling_write_many_read_one(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1539,6 +1597,7 @@ test_subfiling_write_many_read_one(void) buf = NULL; VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); mpi_code_g = MPI_Barrier(comm_g); @@ -1616,6 +1675,7 @@ test_subfiling_write_many_read_few(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; @@ -1673,7 +1733,10 @@ test_subfiling_write_many_read_few(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1695,6 +1758,7 @@ test_subfiling_write_many_read_few(void) buf = NULL; VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); /* @@ -1808,6 +1872,7 @@ test_subfiling_h5fuse(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; int skip_test = 0; @@ -1898,7 +1963,10 @@ test_subfiling_h5fuse(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1919,8 +1987,11 @@ test_subfiling_h5fuse(void) free(buf); buf = NULL; + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_INDEPENDENT) >= 0), "H5Pset_dxpl_mpio succeeded"); + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); if (MAINPROCESS) { @@ -1973,8 +2044,10 @@ test_subfiling_h5fuse(void) } /* Verify the size of the fused file */ - VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); - VRFY(((size_t)file_info.st_size >= target_size), "File size verification succeeded"); + if (!enable_compression) { + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + VRFY(((size_t)file_info.st_size >= target_size), "File size verification succeeded"); + } /* Re-open file with sec2 driver and verify the data */ file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); @@ -2414,9 +2487,28 @@ main(int argc, char **argv) if (num_iocs_g > mpi_size) num_iocs_g = mpi_size; - if (MAINPROCESS) { - printf("Re-running tests with environment variables set\n"); + if (MAINPROCESS) + printf(" Re-running tests with compression enabled\n"); + +#ifdef H5_HAVE_FILTER_DEFLATE + enable_compression = true; + for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { + if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { + (*tests[i])(); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } } + enable_compression = false; +#else + if (MAINPROCESS) + SKIPPED(); +#endif + if (MAINPROCESS) + printf("\nRe-running tests with environment variables set\n"); for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { @@ -2430,13 +2522,29 @@ main(int argc, char **argv) } if (MAINPROCESS) - puts(""); - + printf("\n Re-running tests with compression enabled\n"); +#ifdef H5_HAVE_FILTER_DEFLATE + enable_compression = true; + for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { + if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { + (*tests[i])(); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } + } + enable_compression = false; +#else + if (MAINPROCESS) + SKIPPED(); +#endif if (nerrors) goto exit; if (MAINPROCESS) - puts("All Subfiling VFD tests passed\n"); + puts("\nAll Subfiling VFD tests passed\n"); exit: if (must_unset_stripe_size_env) From cbd5a8d3ce28fa409ce27db167691278a4d60852 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 25 Oct 2023 16:37:19 -0500 Subject: [PATCH 061/101] Cache installation of OpenMPI for AOCC action (#3774) --- .github/workflows/linux-auto-aocc-ompi.yml | 25 ++++++++++++++-------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/.github/workflows/linux-auto-aocc-ompi.yml b/.github/workflows/linux-auto-aocc-ompi.yml index 76b6452226c..e4aa25a4e94 100644 --- a/.github/workflows/linux-auto-aocc-ompi.yml +++ b/.github/workflows/linux-auto-aocc-ompi.yml @@ -21,7 +21,7 @@ concurrency: permissions: contents: read - + jobs: build: runs-on: ubuntu-latest @@ -42,24 +42,31 @@ jobs: which clang which flang clang -v - - name: Install OpenMPI 4.1.5 + - name: Cache OpenMPI 4.1.5 installation + id: cache-openmpi-4_1_5 + uses: actions/cache@v3 + with: + path: /home/runner/work/hdf5/hdf5/openmpi-4.1.5-install + key: ${{ runner.os }}-${{ runner.arch }}-openmpi-4_1_5-cache + - if: ${{ steps.cache-openmpi-4_1_5.outputs.cache-hit != 'true' }} + name: Install OpenMPI 4.1.5 run: | export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.5.tar.gz tar zxvf openmpi-4.1.5.tar.gz cd openmpi-4.1.5 - ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/flang --prefix=/usr/local + ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/flang --prefix=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install make - sudo make install + make install - name: Install HDF5 env: - NPROCS: 2 + NPROCS: 2 run: | - export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib/openmpi:/usr/local/lib - export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib/openmpi:/usr/local/lib - export PATH=/usr/local/bin:$PATH + export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib + export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/bin:/usr/local/bin:$PATH ./autogen.sh - ./configure --prefix=/tmp --enable-parallel --enable-shared CC=/usr/local/bin/mpicc LDFLAGS="-L/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib -L/usr/local/lib/openmpi" + ./configure --prefix=/tmp --enable-parallel --enable-shared CC=mpicc LDFLAGS="-L/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib -L/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib" make -j make check -j make install From ebeb642fcdc3c82dec38c046948559119cd4451b Mon Sep 17 00:00:00 2001 From: tbeu Date: Thu, 26 Oct 2023 05:08:41 +0200 Subject: [PATCH 062/101] Fix typo in comment (#3775) --- src/H5Tvlen.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/H5Tvlen.c b/src/H5Tvlen.c index 68087a8e134..651ff8f7267 100644 --- a/src/H5Tvlen.c +++ b/src/H5Tvlen.c @@ -503,7 +503,7 @@ H5T__vlen_mem_seq_write(H5VL_object_t H5_ATTR_UNUSED *file, const H5T_vlen_alloc if (seq_len) { size_t len = seq_len * base_size; /* Sequence size */ - /* Use the user's memory allocation routine is one is defined */ + /* Use the user's memory allocation routine if one is defined */ if (vl_alloc_info->alloc_func != NULL) { if (NULL == (vl.p = (vl_alloc_info->alloc_func)(len, vl_alloc_info->alloc_info))) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, From 6ccace572e99561adf820af686da19f9a31efee3 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 25 Oct 2023 21:16:32 -0700 Subject: [PATCH 063/101] Fixed a file handle leak in the core VFD (#3779) When opening a file with the core VFD and a file image, if the file already exists, the file check would leak the POSIX file handle. Fixes GitHub issue #635 --- release_docs/RELEASE.txt | 7 +++++++ src/H5FDcore.c | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 291f72a8701..7266de3eea9 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -404,6 +404,13 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed a file handle leak in the core VFD + + When opening a file with the core VFD and a file image, if the file + already exists, the file check would leak the POSIX file handle. + + Fixes GitHub issue #635 + - Fixed some issues with chunk index metadata not getting read collectively when collective metadata reads are enabled diff --git a/src/H5FDcore.c b/src/H5FDcore.c index 08b714dfa34..1aa8d4bf19c 100644 --- a/src/H5FDcore.c +++ b/src/H5FDcore.c @@ -754,8 +754,10 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr ((file_image_info.buffer == NULL) && (file_image_info.size == 0))); memset(&sb, 0, sizeof(sb)); if ((file_image_info.buffer != NULL) && !(H5F_ACC_CREAT & flags)) { - if (HDopen(name, o_flags, H5_POSIX_CREATE_MODE_RW) >= 0) + if ((fd = HDopen(name, o_flags, H5_POSIX_CREATE_MODE_RW)) >= 0) { + HDclose(fd); HGOTO_ERROR(H5E_FILE, H5E_FILEEXISTS, NULL, "file already exists"); + } /* If backing store is requested, create and stat the file * Note: We are forcing the O_CREAT flag here, even though this is From 74cc1cf59eae121c1d15caf4fb0420a18a45da13 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Wed, 25 Oct 2023 21:31:21 -0700 Subject: [PATCH 064/101] Fix a format string warning in the C++ examples (#3776) --- hl/c++/examples/ptExampleFL.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hl/c++/examples/ptExampleFL.cpp b/hl/c++/examples/ptExampleFL.cpp index 0e280f0355b..cb407e21f65 100644 --- a/hl/c++/examples/ptExampleFL.cpp +++ b/hl/c++/examples/ptExampleFL.cpp @@ -72,7 +72,7 @@ main(void) if (err < 0) fprintf(stderr, "Error getting packet count."); - printf("Number of packets in packet table after five appends: %llu\n", count); + printf("Number of packets in packet table after five appends: %" PRIuHSIZE "\n", count); /* Initialize packet table's "current record" */ ptable.ResetIndex(); From 119e694f971470b26876d227ceedbaab36eef275 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Thu, 26 Oct 2023 10:49:50 -0500 Subject: [PATCH 065/101] Cancel running GitHub workflows on push to same PR (#3772) * Cancel running GitHub workflows on push to same PR * Remove github.sha from workflow concurrency groups --- .github/workflows/cve.yml | 2 +- .github/workflows/hdfeos5.yml | 2 +- .github/workflows/linux-auto-aocc-ompi.yml | 2 +- .github/workflows/main.yml | 2 +- .github/workflows/netcdf.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cve.yml b/.github/workflows/cve.yml index 6756840981d..372518ade12 100644 --- a/.github/workflows/cve.yml +++ b/.github/workflows/cve.yml @@ -16,7 +16,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml index 5faf74a9ac4..dad262d426f 100644 --- a/.github/workflows/hdfeos5.yml +++ b/.github/workflows/hdfeos5.yml @@ -16,7 +16,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: diff --git a/.github/workflows/linux-auto-aocc-ompi.yml b/.github/workflows/linux-auto-aocc-ompi.yml index e4aa25a4e94..435d93f95c7 100644 --- a/.github/workflows/linux-auto-aocc-ompi.yml +++ b/.github/workflows/linux-auto-aocc-ompi.yml @@ -16,7 +16,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e472b8cff56..43513c51a26 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -17,7 +17,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: diff --git a/.github/workflows/netcdf.yml b/.github/workflows/netcdf.yml index f38608012c0..f08361f6ea0 100644 --- a/.github/workflows/netcdf.yml +++ b/.github/workflows/netcdf.yml @@ -19,7 +19,7 @@ permissions: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true jobs: From 1bcef50b864fb1ea6e00900b3982e71d183cfda5 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Fri, 27 Oct 2023 23:18:24 -0500 Subject: [PATCH 066/101] Print some messages in parallel tests on MPI rank 0 only (#3785) Avoids overly verbose output from all processes emitting progress, etc. info. --- test/h5test.c | 28 +++++++++------ test/testframe.c | 84 ++++++++++++++++++++++++------------------- testpar/t_2Gio.c | 6 ++-- testpar/t_mpi.c | 46 +++++++++++------------- testpar/t_shapesame.c | 17 +++++---- 5 files changed, 99 insertions(+), 82 deletions(-) diff --git a/test/h5test.c b/test/h5test.c index 5348e1b4c8d..304637580f6 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -864,22 +864,23 @@ h5_show_hostname(void) WSADATA wsaData; int err; #endif +#ifdef H5_HAVE_PARALLEL + int mpi_rank, mpi_initialized, mpi_finalized; +#endif /* try show the process or thread id in multiple processes cases*/ #ifdef H5_HAVE_PARALLEL - { - int mpi_rank, mpi_initialized, mpi_finalized; - - MPI_Initialized(&mpi_initialized); - MPI_Finalized(&mpi_finalized); + MPI_Initialized(&mpi_initialized); + MPI_Finalized(&mpi_finalized); - if (mpi_initialized && !mpi_finalized) { - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - printf("MPI-process %d.", mpi_rank); - } - else - printf("thread 0."); + if (mpi_initialized && !mpi_finalized) { + /* Prevent output here from getting mixed with later output */ + MPI_Barrier(MPI_COMM_WORLD); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + printf("MPI-process %d.", mpi_rank); } + else + printf("thread 0."); #else printf("thread %" PRIu64 ".", H5TS_thread_id()); #endif @@ -915,6 +916,11 @@ h5_show_hostname(void) #ifdef H5_HAVE_WIN32_API WSACleanup(); #endif +#ifdef H5_HAVE_PARALLEL + /* Prevent output here from getting mixed with later output */ + if (mpi_initialized && !mpi_finalized) + MPI_Barrier(MPI_COMM_WORLD); +#endif } #ifdef H5_HAVE_PARALLEL diff --git a/test/testframe.c b/test/testframe.c index 2b650270f3a..5cb25ed8148 100644 --- a/test/testframe.c +++ b/test/testframe.c @@ -155,35 +155,37 @@ TestUsage(void) { unsigned i; - print_func("Usage: %s [-v[erbose] (l[ow]|m[edium]|h[igh]|0-9)] %s\n", TestProgName, - (TestPrivateUsage ? "" : "")); - print_func(" [-[e]x[clude] name]+ \n"); - print_func(" [-o[nly] name]+ \n"); - print_func(" [-b[egin] name] \n"); - print_func(" [-s[ummary]] \n"); - print_func(" [-c[leanoff]] \n"); - print_func(" [-h[elp]] \n"); - print_func("\n\n"); - print_func("verbose controls the amount of information displayed\n"); - print_func("exclude to exclude tests by name\n"); - print_func("only to name tests which should be run\n"); - print_func("begin start at the name of the test given\n"); - print_func("summary prints a summary of test results at the end\n"); - print_func("cleanoff does not delete *.hdf files after execution of tests\n"); - print_func("help print out this information\n"); - if (TestPrivateUsage) { - print_func("\nExtra options\n"); - TestPrivateUsage(); - } - print_func("\n\n"); - print_func("This program currently tests the following: \n\n"); - print_func("%16s %s\n", "Name", "Description"); - print_func("%16s %s\n", "----", "-----------"); + if (mpi_rank_framework_g == 0) { + print_func("Usage: %s [-v[erbose] (l[ow]|m[edium]|h[igh]|0-9)] %s\n", TestProgName, + (TestPrivateUsage ? "" : "")); + print_func(" [-[e]x[clude] name]+ \n"); + print_func(" [-o[nly] name]+ \n"); + print_func(" [-b[egin] name] \n"); + print_func(" [-s[ummary]] \n"); + print_func(" [-c[leanoff]] \n"); + print_func(" [-h[elp]] \n"); + print_func("\n\n"); + print_func("verbose controls the amount of information displayed\n"); + print_func("exclude to exclude tests by name\n"); + print_func("only to name tests which should be run\n"); + print_func("begin start at the name of the test given\n"); + print_func("summary prints a summary of test results at the end\n"); + print_func("cleanoff does not delete *.hdf files after execution of tests\n"); + print_func("help print out this information\n"); + if (TestPrivateUsage) { + print_func("\nExtra options\n"); + TestPrivateUsage(); + } + print_func("\n\n"); + print_func("This program currently tests the following: \n\n"); + print_func("%16s %s\n", "Name", "Description"); + print_func("%16s %s\n", "----", "-----------"); - for (i = 0; i < Index; i++) - print_func("%16s %s\n", Test[i].Name, Test[i].Description); + for (i = 0; i < Index; i++) + print_func("%16s %s\n", Test[i].Name, Test[i].Description); - print_func("\n\n"); + print_func("\n\n"); + } } /* @@ -192,12 +194,14 @@ TestUsage(void) void TestInfo(const char *ProgName) { - unsigned major, minor, release; + if (mpi_rank_framework_g == 0) { + unsigned major, minor, release; - H5get_libversion(&major, &minor, &release); + H5get_libversion(&major, &minor, &release); - print_func("\nFor help use: %s -help\n", ProgName); - print_func("Linked with hdf5 version %u.%u release %u\n", major, minor, release); + print_func("\nFor help use: %s -help\n", ProgName); + print_func("Linked with hdf5 version %u.%u release %u\n", major, minor, release); + } } /* @@ -301,20 +305,24 @@ PerformTests(void) for (Loop = 0; Loop < Index; Loop++) if (Test[Loop].SkipFlag) { - MESSAGE(2, ("Skipping -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); + if (mpi_rank_framework_g == 0) + MESSAGE(2, ("Skipping -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); } else { if (mpi_rank_framework_g == 0) MESSAGE(2, ("Testing -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); - MESSAGE(5, ("===============================================\n")); + if (mpi_rank_framework_g == 0) + MESSAGE(5, ("===============================================\n")); Test[Loop].NumErrors = num_errs; Test_parameters = Test[Loop].Parameters; TestAlarmOn(); Test[Loop].Call(); TestAlarmOff(); Test[Loop].NumErrors = num_errs - Test[Loop].NumErrors; - MESSAGE(5, ("===============================================\n")); - MESSAGE(5, ("There were %d errors detected.\n\n", (int)Test[Loop].NumErrors)); + if (mpi_rank_framework_g == 0) { + MESSAGE(5, ("===============================================\n")); + MESSAGE(5, ("There were %d errors detected.\n\n", (int)Test[Loop].NumErrors)); + } } Test_parameters = NULL; /* clear it. */ @@ -358,7 +366,8 @@ TestCleanup(void) { unsigned Loop; - MESSAGE(2, ("\nCleaning Up temp files...\n\n")); + if (mpi_rank_framework_g == 0) + MESSAGE(2, ("\nCleaning Up temp files...\n\n")); /* call individual cleanup routines in each source module */ for (Loop = 0; Loop < Index; Loop++) @@ -619,7 +628,8 @@ SetTest(const char *testname, int action) break; default: /* error */ - printf("*** ERROR: Unknown action (%d) for SetTest\n", action); + if (mpi_rank_framework_g == 0) + printf("*** ERROR: Unknown action (%d) for SetTest\n", action); break; } } diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c index c2aac771b29..48abf8ed9dd 100644 --- a/testpar/t_2Gio.c +++ b/testpar/t_2Gio.c @@ -4291,9 +4291,10 @@ main(int argc, char **argv) printf("2 GByte IO TESTS START\n"); printf("2 MPI ranks will run the tests...\n"); printf("===================================\n"); - h5_show_hostname(); } + h5_show_hostname(); + if (H5dont_atexit() < 0) { printf("Failed to turn off atexit processing. Continue.\n"); }; @@ -4345,8 +4346,7 @@ main(int argc, char **argv) #endif /* H5_HAVE_FILTER_DEFLATE */ /* Display testing information */ - if (MAINPROCESS) - TestInfo(argv[0]); + TestInfo(argv[0]); /* setup file access property list */ fapl = H5Pcreate(H5P_FILE_ACCESS); diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c index eff39d057e0..0f1e27b506e 100644 --- a/testpar/t_mpi.c +++ b/testpar/t_mpi.c @@ -53,14 +53,14 @@ test_mpio_overlap_writes(char *filename) MPI_Offset mpi_off; MPI_Status mpi_stat; - if (VERBOSE_MED) - printf("MPIO independent overlapping writes test on file %s\n", filename); - nerrs = 0; /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + if (VERBOSE_MED && MAINPROCESS) + printf("MPIO independent overlapping writes test on file %s\n", filename); + /* Need at least 2 processes */ if (mpi_size < 2) { if (MAINPROCESS) @@ -211,7 +211,7 @@ test_mpio_gb_file(char *filename) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - if (VERBOSE_MED) + if (VERBOSE_MED && MAINPROCESS) printf("MPI_Offset range test\n"); /* figure out the signness and sizeof MPI_Offset */ @@ -274,12 +274,13 @@ test_mpio_gb_file(char *filename) /* * Verify if we can write to a file of multiple GB sizes. */ - if (VERBOSE_MED) + if (VERBOSE_MED && MAINPROCESS) printf("MPIO GB file test %s\n", filename); if (sizeof_mpi_offset <= 4) { - printf("Skipped GB file range test " - "because MPI_Offset cannot support it\n"); + if (MAINPROCESS) + printf("Skipped GB file range test " + "because MPI_Offset cannot support it\n"); } else { buf = (char *)malloc(MB); @@ -294,7 +295,8 @@ test_mpio_gb_file(char *filename) mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh); VRFY((mrc == MPI_SUCCESS), "MPI_FILE_OPEN"); - printf("MPIO GB file write test %s\n", filename); + if (MAINPROCESS) + printf("MPIO GB file write test %s\n", filename); /* instead of writing every bytes of the file, we will just write * some data around the 2 and 4 GB boundaries. That should cover @@ -333,7 +335,8 @@ test_mpio_gb_file(char *filename) */ /* open it again to verify the data written */ /* but only if there was no write errors */ - printf("MPIO GB file read test %s\n", filename); + if (MAINPROCESS) + printf("MPIO GB file read test %s\n", filename); if (errors_sum(writerrs) > 0) { printf("proc %d: Skip read test due to previous write errors\n", mpi_rank); goto finish; @@ -377,7 +380,8 @@ test_mpio_gb_file(char *filename) mrc = MPI_Barrier(MPI_COMM_WORLD); VRFY((mrc == MPI_SUCCESS), "Sync before leaving test"); - printf("Test if MPI_File_get_size works correctly with %s\n", filename); + if (MAINPROCESS) + printf("Test if MPI_File_get_size works correctly with %s\n", filename); mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); VRFY((mrc == MPI_SUCCESS), ""); @@ -432,7 +436,6 @@ test_mpio_gb_file(char *filename) static int test_mpio_1wMr(char *filename, int special_request) { - char hostname[128]; int mpi_size, mpi_rank; MPI_File fh; char mpi_err_str[MPI_MAX_ERROR_STRING]; @@ -456,19 +459,8 @@ test_mpio_1wMr(char *filename, int special_request) } /* show the hostname so that we can tell where the processes are running */ - if (VERBOSE_DEF) { -#ifdef H5_HAVE_GETHOSTNAME - if (gethostname(hostname, sizeof(hostname)) < 0) { - printf("gethostname failed\n"); - hostname[0] = '\0'; - } -#else - printf("gethostname unavailable\n"); - hostname[0] = '\0'; -#endif - PRINTID; - printf("hostname=%s\n", hostname); - } + if (VERBOSE_DEF) + h5_show_hostname(); /* Delete any old file in order to start anew. */ /* Must delete because MPI_File_open does not have a Truncate mode. */ @@ -1005,6 +997,10 @@ test_mpio_special_collective(char *filename) static int parse_options(int argc, char **argv) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + while (--argc) { if (**(++argv) != '-') { break; @@ -1053,7 +1049,7 @@ parse_options(int argc, char **argv) return (1); } H5Pclose(plist); - if (VERBOSE_MED) { + if (VERBOSE_MED && MAINPROCESS) { printf("Test filenames are:\n"); for (i = 0; i < n; i++) printf(" %s\n", filenames[i]); diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index 98e307772a9..0a3d3d0a49e 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -4089,7 +4089,8 @@ parse_options(int argc, char **argv) case 'h': /* print help message--return with nerrors set */ return (1); default: - printf("Illegal option(%s)\n", *argv); + if (MAINPROCESS) + printf("Illegal option(%s)\n", *argv); nerrors++; return (1); } @@ -4098,12 +4099,14 @@ parse_options(int argc, char **argv) /* check validity of dimension and chunk sizes */ if (dim0 <= 0 || dim1 <= 0) { - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + if (MAINPROCESS) + printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); nerrors++; return (1); } if (chunkdim0 <= 0 || chunkdim1 <= 0) { - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + if (MAINPROCESS) + printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); nerrors++; return (1); } @@ -4128,9 +4131,11 @@ parse_options(int argc, char **argv) nerrors++; return (1); } - printf("Test filenames are:\n"); - for (i = 0; i < n; i++) - printf(" %s\n", filenames[i]); + if (MAINPROCESS) { + printf("Test filenames are:\n"); + for (i = 0; i < n; i++) + printf(" %s\n", filenames[i]); + } } return (0); From edb5cffdb20ad959c80b665cb8c2589f4c71414d Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Fri, 27 Oct 2023 23:20:29 -0500 Subject: [PATCH 067/101] Avoid attempted use of NULL pointer in parallel compression code (#3786) The parallel compression test code tests for the case where all MPI ranks have no selection in a dataset when writing to it. Add an early exit to the code to avoid attempting to use a NULL pointer due to there being no work to do. --- src/H5Dmpio.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 16243fadf92..b40ab4b2012 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -3782,6 +3782,10 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun counts_disps_array = H5MM_xfree(counts_disps_array); } + /* No useful work to do - exit */ + if (coll_chunk_list_num_entries == 0) + HGOTO_DONE(SUCCEED); + /* * Phase 2 - Involved ranks now redistribute any shared chunks to new * owners as necessary. From 556add35b78673a8a402841a7ef7e03fdb53daba Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 30 Oct 2023 11:12:10 -0500 Subject: [PATCH 068/101] Don't install h5tools_test_utils test program on system (#3793) --- release_docs/RELEASE.txt | 10 ++++++++++ tools/libtest/Makefile.am | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 7266de3eea9..0f18f6fa276 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -815,6 +815,16 @@ Bug Fixes since HDF5-1.14.0 release Configuration ------------- + - Fixed an issue where the h5tools_test_utils test program was being + installed on the system for Autotools builds of HDF5 + + The h5tools_test_utils test program was mistakenly added to bin_PROGRAMS + in its Makefile.am configuration file, causing the executable to be + installed on the system. The executable is now added to noinst_PROGRAMS + instead and will no longer be installed on the system for Autotools builds + of HDF5. The CMake configuration code already avoids installing the + executable on the system. + - Fixed a configuration issue that prevented building of the Subfiling VFD on macOS Checks were added to the CMake and Autotools code to verify that CLOCK_MONOTONIC_COARSE, diff --git a/tools/libtest/Makefile.am b/tools/libtest/Makefile.am index 45b3f476df7..8a503d033b2 100644 --- a/tools/libtest/Makefile.am +++ b/tools/libtest/Makefile.am @@ -27,7 +27,7 @@ LDADD=$(LIBH5TOOLS) $(LIBH5TEST) $(LIBHDF5) # main target -bin_PROGRAMS=h5tools_test_utils +noinst_PROGRAMS=h5tools_test_utils # check_PROGRAMS=$(TEST_PROG) From 87636d685594e804c0bfb59933e3a56b2dabcd28 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 30 Oct 2023 09:13:01 -0700 Subject: [PATCH 069/101] Add Doxygen to H5FDsplitter.h (#3794) * H5FD_CURR_SPLITTER_VFD_CONFIG_VERSION * H5FD_SPLITTER_PATH_MAX * H5FD_SPLITTER_MAGIC * H5FD_splitter_vfd_config_t * H5Pset_fapl_splitter() * H5Pget_fapl_splitter() --- src/H5FDsplitter.h | 109 ++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 55 deletions(-) diff --git a/src/H5FDsplitter.h b/src/H5FDsplitter.h index c8751c82349..8e790e212b6 100644 --- a/src/H5FDsplitter.h +++ b/src/H5FDsplitter.h @@ -20,70 +20,37 @@ #define H5FD_SPLITTER (H5FDperform_init(H5FD_splitter_init)) #define H5FD_SPLITTER_VALUE H5_VFD_SPLITTER -/* The version of the H5FD_splitter_vfd_config_t structure used */ +/** The version of the H5FD_splitter_vfd_config_t structure used */ #define H5FD_CURR_SPLITTER_VFD_CONFIG_VERSION 1 -/* Maximum length of a filename/path string in the Write-Only channel, +/** + * Maximum length of a filename/path string in the Write-Only channel, * including the NULL-terminator. */ #define H5FD_SPLITTER_PATH_MAX 4096 -/* Semi-unique constant used to help identify structure pointers */ +/** Semi-unique constant used to help identify structure pointers */ #define H5FD_SPLITTER_MAGIC 0x2B916880 -/* ---------------------------------------------------------------------------- - * Structure: H5FD_spliiter_vfd_config_t - * - * One-stop shopping for configuring a Splitter VFD (rather than many - * parameters passed into H5Pset/get functions). - * - * magic (int32_t) - * Semi-unique number, used to sanity-check that a given pointer is - * likely (or not) to be this structure type. MUST be first. - * If magic is not H5FD_SPLITTER_MAGIC, the structure (and/or pointer to) - * must be considered invalid. - * - * version (unsigned int) - * Version number of this structure -- informs component membership. - * If not H5FD_CURR_SPLITTER_VFD_CONFIG_VERSION, the structure (and/or - * pointer to) must be considered invalid. - * - * rw_fapl_id (hid_t) - * Library-given identification number of the Read/Write channel driver - * File Access Property List. - * The driver must support read/write access. - * Must be set to H5P_DEFAULT or a valid FAPL ID. - * - * wo_fapl_id (hid_t) - * Library-given identification number of the Read/Write channel driver - * File Access Property List. - * The driver feature flags must include H5FD_FEAT_DEFAULT_VFD_COMPAITBLE. - * Must be set to H5P_DEFAULT or a valid FAPL ID. - * - * wo_file_path (char[H5FD_SPLITTER_PATH_MAX + 1]) - * String buffer for the Write-Only channel target file. - * Must be null-terminated, cannot be empty. - * - * log_file_path (char[H5FD_SPLITTER_PATH_MAX + 1]) - * String buffer for the Splitter VFD logging output. - * Must be null-terminated. - * If null, no logfile is created. - * - * ignore_wo_errors (hbool_t) - * Toggle flag for how judiciously to respond to errors on the Write-Only - * channel. - * - * ---------------------------------------------------------------------------- +//! +/** + * Configuration options for setting up the Splitter VFD */ typedef struct H5FD_splitter_vfd_config_t { - int32_t magic; - unsigned int version; - hid_t rw_fapl_id; - hid_t wo_fapl_id; - char wo_path[H5FD_SPLITTER_PATH_MAX + 1]; - char log_file_path[H5FD_SPLITTER_PATH_MAX + 1]; - hbool_t ignore_wo_errs; + int32_t magic; /**< Magic number to identify this struct. Must be \p H5FD_SPLITTER_MAGIC. */ + unsigned int version; /**< Version number of this struct. Currently must be \p + H5FD_CURR_SPLITTER_VFD_CONFIG_VERSION. */ + hid_t rw_fapl_id; /**< File-access property list for setting up the read/write channel. Can be \p + H5P_DEFAULT. */ + hid_t wo_fapl_id; /**< File-access property list for setting up the read-only channel. The selected VFD + must support the \p H5FD_FEAT_DEFAULT_VFD_COMPATIBLE flag. Can be \p H5P_DEFAULT. */ + char wo_path[H5FD_SPLITTER_PATH_MAX + 1]; /**< Path to the write-only file */ + char log_file_path[H5FD_SPLITTER_PATH_MAX + 1]; /**< Path to the log file, which will be created on HDF5 + file open (existing files will be clobbered). Can be + NULL, in which case no logging output is generated. */ + hbool_t ignore_wo_errs; /**< Whether to ignore errors on the write-only channel */ } H5FD_splitter_vfd_config_t; +//! #ifdef __cplusplus extern "C" { @@ -93,14 +60,46 @@ H5_DLL hid_t H5FD_splitter_init(void); /** * \ingroup FAPL * - * \todo Add missing documentation + * \brief Sets the file access property list to use the splitter driver + * + * \fapl_id + * \param[in] config_ptr Configuration options for the VFD + * \returns \herr_t + * + * \details H5Pset_fapl_splitter() sets the file access property list identifier, + * \p fapl_id, to use the splitter driver. + * + * The splitter VFD echoes file manipulation (e.g. create, truncate) + * and write calls to a second, write-only file. + * + * \note The splitter VFD should not be confused with the split VFD, + * which is a simplification of the multi VFD and creates separate + * files for metadata and data. + * + * \since 1.10.7, 1.12.1 */ H5_DLL herr_t H5Pset_fapl_splitter(hid_t fapl_id, H5FD_splitter_vfd_config_t *config_ptr); /** * \ingroup FAPL * - * \todo Add missing documentation + * \brief Gets splitter driver properties from the the file access property list + * + * \fapl_id + * \param[out] config_ptr Configuration options for the VFD + * \returns \herr_t + * + * \details H5Pset_fapl_splitter() sets the file access property list identifier, + * \p fapl_id, to use the splitter driver. + * + * The splitter VFD echoes file manipulation (e.g. create, truncate) + * and write calls to a second file. + * + * \note The splitter VFD should not be confused with the split VFD, + * which is a simplification of the multi VFD and creates separate + * files for metadata and data. + * + * \since 1.10.7, 1.12.1 */ H5_DLL herr_t H5Pget_fapl_splitter(hid_t fapl_id, H5FD_splitter_vfd_config_t *config_ptr); From 58ef7dcdc9bfbfc65fe69d782914671f66322d85 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 30 Oct 2023 09:13:54 -0700 Subject: [PATCH 070/101] Update Doxygen initializers & identifiers in VFDs (#3795) * Add Doxygen for all H5FD_ initializers * Add Doxygen for all H5FD__VALUE values * Mark H5FD__init() calls private in Doxygen --- src/H5FDcore.h | 12 ++++++++++-- src/H5FDdirect.h | 33 +++++++++++++++++++++++++-------- src/H5FDfamily.h | 11 +++++++++-- src/H5FDhdfs.h | 21 ++++++++++++++++++--- src/H5FDlog.h | 11 +++++++++-- src/H5FDmirror.h | 11 +++++++++-- src/H5FDmpio.h | 25 ++++++++++++++++--------- src/H5FDmulti.h | 8 +++++++- src/H5FDonion.h | 17 ++++++++++------- src/H5FDros3.h | 20 ++++++++++++-------- src/H5FDsec2.h | 13 ++++++++++--- src/H5FDsplitter.h | 12 ++++++++++-- src/H5FDstdio.h | 14 ++++++++++++-- src/H5FDwindows.h | 7 ++++++- 14 files changed, 163 insertions(+), 52 deletions(-) diff --git a/src/H5FDcore.h b/src/H5FDcore.h index 235d6fcaaf7..cd45c8d6061 100644 --- a/src/H5FDcore.h +++ b/src/H5FDcore.h @@ -11,17 +11,25 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the core driver. + * Purpose: The public header file for the core virtual file driver (VFD) */ #ifndef H5FDcore_H #define H5FDcore_H -#define H5FD_CORE (H5FDperform_init(H5FD_core_init)) +/** Initializer for the core VFD */ +#define H5FD_CORE (H5FDperform_init(H5FD_core_init)) + +/** Identifier for the core VFD */ #define H5FD_CORE_VALUE H5_VFD_CORE #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the core VFD + */ H5_DLL hid_t H5FD_core_init(void); /** diff --git a/src/H5FDdirect.h b/src/H5FDdirect.h index e47ac37cdea..1e60bb08119 100644 --- a/src/H5FDdirect.h +++ b/src/H5FDdirect.h @@ -11,30 +11,47 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the direct driver. + * Purpose: The public header file for the direct virtual file driver (VFD) */ #ifndef H5FDdirect_H #define H5FDdirect_H #ifdef H5_HAVE_DIRECT -#define H5FD_DIRECT (H5FDperform_init(H5FD_direct_init)) + +/** Initializer for the direct VFD */ +#define H5FD_DIRECT (H5FDperform_init(H5FD_direct_init)) + +/** Identifier for the direct VFD */ #define H5FD_DIRECT_VALUE H5_VFD_DIRECT + #else + +/** Initializer for the direct VFD (disabled) */ #define H5FD_DIRECT (H5I_INVALID_HID) + +/** Identifier for the direct VFD (disabled) */ #define H5FD_DIRECT_VALUE H5_VFD_INVALID + #endif /* H5_HAVE_DIRECT */ +/** Default value for memory boundary */ +#define MBOUNDARY_DEF 4096 + +/** Default value for file block size */ +#define FBSIZE_DEF 4096 + +/** Default value for maximum copy buffer size */ +#define CBSIZE_DEF (16 * 1024 * 1024) + #ifdef H5_HAVE_DIRECT #ifdef __cplusplus extern "C" { #endif -/* Default values for memory boundary, file block size, and maximal copy buffer size. - * Application can set these values through the function H5Pset_fapl_direct. */ -#define MBOUNDARY_DEF 4096 -#define FBSIZE_DEF 4096 -#define CBSIZE_DEF 16 * 1024 * 1024 - +/** @private + * + * \brief Private initializer for the direct VFD + */ H5_DLL hid_t H5FD_direct_init(void); /** diff --git a/src/H5FDfamily.h b/src/H5FDfamily.h index 76020f0a268..32e885c422d 100644 --- a/src/H5FDfamily.h +++ b/src/H5FDfamily.h @@ -11,18 +11,25 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the family driver. + * Purpose: The public header file for the family virtual file driver (VFD) */ #ifndef H5FDfamily_H #define H5FDfamily_H -#define H5FD_FAMILY (H5FDperform_init(H5FD_family_init)) +/** Initializer for the family VFD */ +#define H5FD_FAMILY (H5FDperform_init(H5FD_family_init)) + +/** Identifier for the family VFD */ #define H5FD_FAMILY_VALUE H5_VFD_FAMILY #ifdef __cplusplus extern "C" { #endif +/** @private + * + * \brief Private initializer for the family VFD + */ H5_DLL hid_t H5FD_family_init(void); /** diff --git a/src/H5FDhdfs.h b/src/H5FDhdfs.h index c8c2c37f1b5..e5f7173fce2 100644 --- a/src/H5FDhdfs.h +++ b/src/H5FDhdfs.h @@ -11,18 +11,29 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the hdfs driver. + * Purpose: The public header file for the Hadoop Distributed File System + * (hdfs) virtual file driver (VFD) */ #ifndef H5FDhdfs_H #define H5FDhdfs_H #ifdef H5_HAVE_LIBHDFS -#define H5FD_HDFS (H5FDperform_init(H5FD_hdfs_init)) + +/** Initializer for the hdfs VFD */ +#define H5FD_HDFS (H5FDperform_init(H5FD_hdfs_init)) + +/** Identifier for the hdfs VFD */ #define H5FD_HDFS_VALUE H5_VFD_HDFS -#else /* H5_HAVE_LIBHDFS */ + +#else + +/** Initializer for the hdfs VFD (disabled) */ #define H5FD_HDFS (H5I_INVALID_HID) + +/** Identifier for the hdfs VFD (disabled) */ #define H5FD_HDFS_VALUE H5_VFD_INVALID + #endif /* H5_HAVE_LIBHDFS */ #ifdef H5_HAVE_LIBHDFS @@ -104,6 +115,10 @@ typedef struct H5FD_hdfs_fapl_t { int32_t stream_buffer_size; } H5FD_hdfs_fapl_t; +/** @private + * + * \brief Private initializer for the hdfs VFD + */ H5_DLL hid_t H5FD_hdfs_init(void); /** diff --git a/src/H5FDlog.h b/src/H5FDlog.h index ae4e2d05e72..b4af2050a62 100644 --- a/src/H5FDlog.h +++ b/src/H5FDlog.h @@ -11,12 +11,15 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the log driver. + * Purpose: The public header file for the log virtual file driver (VFD) */ #ifndef H5FDlog_H #define H5FDlog_H -#define H5FD_LOG (H5FDperform_init(H5FD_log_init)) +/** Initializer for the log VFD */ +#define H5FD_LOG (H5FDperform_init(H5FD_log_init)) + +/** Identifier for the log VFD */ #define H5FD_LOG_VALUE H5_VFD_LOG /* Flags for H5Pset_fapl_log() */ @@ -62,6 +65,10 @@ extern "C" { #endif +/** @private + * + * \brief Private initializer for the log VFD + */ H5_DLL hid_t H5FD_log_init(void); /** diff --git a/src/H5FDmirror.h b/src/H5FDmirror.h index b196b2b1adc..6c98e1a8a6f 100644 --- a/src/H5FDmirror.h +++ b/src/H5FDmirror.h @@ -11,7 +11,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: Public, shared definitions for Mirror VFD & remote Writer. + * Purpose: The public header file for the mirror virtual file driver (VFD) */ #ifndef H5FDmirror_H @@ -19,7 +19,10 @@ #ifdef H5_HAVE_MIRROR_VFD -#define H5FD_MIRROR (H5FDperform_init(H5FD_mirror_init)) +/** Initializer for the mirror VFD */ +#define H5FD_MIRROR (H5FDperform_init(H5FD_mirror_init)) + +/** Identifier for the mirror VFD */ #define H5FD_MIRROR_VALUE H5_VFD_MIRROR #ifdef __cplusplus @@ -62,6 +65,10 @@ typedef struct H5FD_mirror_fapl_t { char remote_ip[H5FD_MIRROR_MAX_IP_LEN + 1]; } H5FD_mirror_fapl_t; +/** @private + * + * \brief Private initializer for the mirror VFD + */ H5_DLL hid_t H5FD_mirror_init(void); /** diff --git a/src/H5FDmpio.h b/src/H5FDmpio.h index 60deec2c07b..5e7ecf30353 100644 --- a/src/H5FDmpio.h +++ b/src/H5FDmpio.h @@ -11,35 +11,42 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the mpio driver. + * Purpose: The public header file for the MPI-I/O (mpio) virtual file driver (VFD) */ #ifndef H5FDmpio_H #define H5FDmpio_H -/* Macros */ - #ifdef H5_HAVE_PARALLEL + +/** Initializer for the mpio VFD */ #define H5FD_MPIO (H5FDperform_init(H5FD_mpio_init)) + #else + +/** Initializer for the mpio VFD (disabled) */ #define H5FD_MPIO (H5I_INVALID_HID) -#endif /* H5_HAVE_PARALLEL */ + +#endif #ifdef H5_HAVE_PARALLEL -/*Turn on H5FDmpio_debug if H5F_DEBUG is on */ -#ifdef H5F_DEBUG -#ifndef H5FDmpio_DEBUG + +#if defined(H5F_DEBUG) && !defined(H5FDmpio_DEBUG) +/** Turn mpio VFD debugging on (requires H5F_DEBUG) */ #define H5FDmpio_DEBUG #endif -#endif /* Global var whose value comes from environment variable */ /* (Defined in H5FDmpio.c) */ H5_DLLVAR hbool_t H5FD_mpi_opt_types_g; -/* Function prototypes */ #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the mpio VFD + */ H5_DLL hid_t H5FD_mpio_init(void); /** diff --git a/src/H5FDmulti.h b/src/H5FDmulti.h index a85f2dfed25..d89a3e27cce 100644 --- a/src/H5FDmulti.h +++ b/src/H5FDmulti.h @@ -11,16 +11,22 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the "multi" driver. + * Purpose: The public header file for the multi virtual file driver (VFD) */ #ifndef H5FDmulti_H #define H5FDmulti_H +/** Initializer for the multi VFD */ #define H5FD_MULTI (H5FDperform_init(H5FD_multi_init)) #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the multi VFD + */ H5_DLL hid_t H5FD_multi_init(void); /** diff --git a/src/H5FDonion.h b/src/H5FDonion.h index 09b290e2a9e..4aaab6d3c3e 100644 --- a/src/H5FDonion.h +++ b/src/H5FDonion.h @@ -11,19 +11,18 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Onion Virtual File Driver (VFD) - * - * Purpose: The public header file for the Onion VFD. + * Purpose: The public header file for the onion virtual file driver (VFD) */ #ifndef H5FDonion_H #define H5FDonion_H -#define H5FD_ONION (H5FDperform_init(H5FD_onion_init)) +/** Initializer for the onion VFD */ +#define H5FD_ONION (H5FDperform_init(H5FD_onion_init)) + +/** Identifier for the onion VFD */ #define H5FD_ONION_VALUE H5_VFD_ONION -/** - * Current version of the onion VFD fapl info struct. - */ +/** Current version of the onion VFD fapl info struct */ #define H5FD_ONION_FAPL_INFO_VERSION_CURR 1 #define H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_PAGE_ALIGNMENT \ @@ -114,6 +113,10 @@ typedef struct H5FD_onion_fapl_info_t { extern "C" { #endif +/** @private + * + * \brief Private initializer for the onion VFD + */ H5_DLL hid_t H5FD_onion_init(void); /** diff --git a/src/H5FDros3.h b/src/H5FDros3.h index ecd26789cc8..217af2d01b6 100644 --- a/src/H5FDros3.h +++ b/src/H5FDros3.h @@ -11,20 +11,24 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Read-Only S3 Virtual File Driver (VFD) - * - * Purpose: The public header file for the ros3 driver. + * Purpose: The public header file for the read-only S3 (ros3) virtual file driver (VFD) */ #ifndef H5FDros3_H #define H5FDros3_H #ifdef H5_HAVE_ROS3_VFD -#define H5FD_ROS3 (H5FDperform_init(H5FD_ros3_init)) +/** Initializer for the ros3 VFD */ +#define H5FD_ROS3 (H5FDperform_init(H5FD_ros3_init)) + +/** Identifier for the ros3 VFD */ #define H5FD_ROS3_VALUE H5_VFD_ROS3 #else +/** Initializer for the ros3 VFD (disabled) */ #define H5FD_ROS3 (H5I_INVALID_HID) + +/** Identifier for the ros3 VFD (disabled) */ #define H5FD_ROS3_VALUE H5_VFD_INVALID -#endif /* H5_HAVE_ROS3_VFD */ +#endif #ifdef H5_HAVE_ROS3_VFD @@ -139,9 +143,9 @@ typedef struct H5FD_ros3_fapl_t { extern "C" { #endif -/** - * \brief Internal routine to initialize #H5FD_ROS3 driver. Not meant to be - * called directly by an HDF5 application. +/** @private + * + * \brief Private initializer for the ros3 VFD */ H5_DLL hid_t H5FD_ros3_init(void); diff --git a/src/H5FDsec2.h b/src/H5FDsec2.h index a2590aee968..dd0a4d8918d 100644 --- a/src/H5FDsec2.h +++ b/src/H5FDsec2.h @@ -11,18 +11,26 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the sec2 driver + * Purpose: The public header file for the POSOX I/O (sec2 - "POSIX section 2") + * virtual file driver (VFD) */ #ifndef H5FDsec2_H #define H5FDsec2_H -#define H5FD_SEC2 (H5FDperform_init(H5FD_sec2_init)) +/** Initializer for the sec2 VFD */ +#define H5FD_SEC2 (H5FDperform_init(H5FD_sec2_init)) + +/** Identifier for the sec2 VFD */ #define H5FD_SEC2_VALUE H5_VFD_SEC2 #ifdef __cplusplus extern "C" { #endif +/** @private + * + * \brief Private initializer for the sec2 VFD + */ H5_DLL hid_t H5FD_sec2_init(void); /** @@ -38,7 +46,6 @@ H5_DLL hid_t H5FD_sec2_init(void); * #H5FD_SEC2 driver. * * \since 1.4.0 - * */ H5_DLL herr_t H5Pset_fapl_sec2(hid_t fapl_id); diff --git a/src/H5FDsplitter.h b/src/H5FDsplitter.h index 8e790e212b6..99a471e5ce3 100644 --- a/src/H5FDsplitter.h +++ b/src/H5FDsplitter.h @@ -11,13 +11,16 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the "splitter" driver. + * Purpose: The public header file for the splitter virtual file driver (VFD) */ #ifndef H5FDsplitter_H #define H5FDsplitter_H -#define H5FD_SPLITTER (H5FDperform_init(H5FD_splitter_init)) +/** Initializer for the splitter VFD */ +#define H5FD_SPLITTER (H5FDperform_init(H5FD_splitter_init)) + +/** Identifier for the splitter VFD */ #define H5FD_SPLITTER_VALUE H5_VFD_SPLITTER /** The version of the H5FD_splitter_vfd_config_t structure used */ @@ -55,6 +58,11 @@ typedef struct H5FD_splitter_vfd_config_t { #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the splitter VFD + */ H5_DLL hid_t H5FD_splitter_init(void); /** diff --git a/src/H5FDstdio.h b/src/H5FDstdio.h index e2e05a77d64..794fe31bf61 100644 --- a/src/H5FDstdio.h +++ b/src/H5FDstdio.h @@ -11,20 +11,26 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the C stdio driver + * Purpose: The public header file for the C stdio virtual file driver (VFD) */ #ifndef H5FDstdio_H #define H5FDstdio_H #include "H5Ipublic.h" +/** Initializer for the stdio VFD */ #define H5FD_STDIO (H5FDperform_init(H5FD_stdio_init)) #ifdef __cplusplus extern "C" { #endif +/** @private + * + * \brief Private initializer for the stdio VFD + */ H5_DLL hid_t H5FD_stdio_init(void); + /** * \ingroup FAPL * @@ -34,7 +40,11 @@ H5_DLL hid_t H5FD_stdio_init(void); * \returns \herr_t * * \details H5Pset_fapl_stdio() modifies the file access property list to use - * the standard I/O driver, H5FDstdio(). + * the stdio VFD, which uses I/O calls from stdio.h. + * + * \note This VFD was designed to be a "demo" VFD that shows how to write + * your own VFD. Most applications should not use this VFD and should instead + * use the POSIX I/O VFD (sec2). * * \since 1.4.0 * diff --git a/src/H5FDwindows.h b/src/H5FDwindows.h index 14f698580f8..673d1c93b59 100644 --- a/src/H5FDwindows.h +++ b/src/H5FDwindows.h @@ -11,11 +11,16 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the Windows driver + * Purpose: The public header file for the Windows virtual file driver (VFD) + * + * This VFD uses no Win32 API calls directly (though it may be + * rewritten to do so in the future). It is currently defined to + * be the sec2 VFD. */ #ifndef H5FDwindows_H #define H5FDwindows_H +/** Initializer for the Windows VFD */ #define H5FD_WINDOWS (H5FD_sec2_init()) #ifdef __cplusplus From 44ec53ec485029fc964d67c811c5d3e227f99372 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:11:07 -0500 Subject: [PATCH 071/101] Remove CDASH_LOCAL variable reference (#3796) --- CMakeInstallation.cmake | 6 +++--- CTestConfig.cmake | 12 ++---------- config/cmake/LIBAEC/CMakeLists.txt | 6 +++--- config/cmake/ZLIB/CMakeLists.txt | 6 +++--- config/cmake/examples/HDF5_Examples.cmake.in | 8 -------- 5 files changed, 11 insertions(+), 27 deletions(-) diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index bb244764de2..74ab8b15fd2 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -295,10 +295,10 @@ endif () if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES) set (CPACK_PACKAGE_VENDOR "HDF_Group") set (CPACK_PACKAGE_NAME "${HDF5_PACKAGE_NAME}") - if (CDASH_LOCAL) - set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION}") - else () + if (NOT WIN32 OR HDF5_VERS_SUBRELEASE MATCHES "^[0-9]+$") set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION_STRING}") + else () + set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION}") endif () set (CPACK_PACKAGE_VERSION_MAJOR "${HDF5_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${HDF5_PACKAGE_VERSION_MINOR}") diff --git a/CTestConfig.cmake b/CTestConfig.cmake index b780b86edae..62beafc0c12 100644 --- a/CTestConfig.cmake +++ b/CTestConfig.cmake @@ -22,20 +22,12 @@ set (CTEST_DROP_METHOD "https") if (CTEST_DROP_SITE_INIT) set (CTEST_DROP_SITE "${CTEST_DROP_SITE_INIT}") else () - if (CDASH_LOCAL) - set (CTEST_DROP_SITE "cdash-internal.hdfgroup.org") - else () - set (CTEST_DROP_SITE "cdash.hdfgroup.org") - endif () + set (CTEST_DROP_SITE "cdash.hdfgroup.org") endif () if (CTEST_DROP_LOCATION_INIT) set (CTEST_DROP_LOCATION "${CTEST_DROP_LOCATION_INIT}") else () - if (CDASH_LOCAL) - set (CTEST_DROP_LOCATION "/submit.php?project=HDF5Trunk") - else () - set (CTEST_DROP_LOCATION "/submit.php?project=HDF5") - endif () + set (CTEST_DROP_LOCATION "/submit.php?project=HDF5") endif () set (CTEST_DROP_SITE_CDASH TRUE) diff --git a/config/cmake/LIBAEC/CMakeLists.txt b/config/cmake/LIBAEC/CMakeLists.txt index 53950d8942a..9ffb68d28b7 100644 --- a/config/cmake/LIBAEC/CMakeLists.txt +++ b/config/cmake/LIBAEC/CMakeLists.txt @@ -369,10 +369,10 @@ configure_file (${LIBAEC_SOURCE_DIR}/README.md ${LIBAEC_BINARY_DIR}/LIBAEC_READM if (NOT LIBAEC_EXTERNALLY_CONFIGURED) set (CPACK_PACKAGE_VENDOR "HDF_Group") set (CPACK_PACKAGE_NAME "${LIBAEC_PACKAGE_NAME}") - if (CDASH_LOCAL) - set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION}") - else () + if (NOT WIN32 OR LIBAEC_VERS_SUBRELEASE MATCHES "^[0-9]+$") set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION_STRING}") + else () + set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION}") endif () set (CPACK_PACKAGE_VERSION_MAJOR "${LIBAEC_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${LIBAEC_PACKAGE_VERSION_MINOR}") diff --git a/config/cmake/ZLIB/CMakeLists.txt b/config/cmake/ZLIB/CMakeLists.txt index 12411ac9e07..5c06a544ce2 100644 --- a/config/cmake/ZLIB/CMakeLists.txt +++ b/config/cmake/ZLIB/CMakeLists.txt @@ -428,10 +428,10 @@ configure_file (${ZLIB_SOURCE_DIR}/README ${ZLIB_BINARY_DIR}/ZLIB_README @ONLY) if (NOT ZLIB_EXTERNALLY_CONFIGURED) set (CPACK_PACKAGE_VENDOR "HDF_Group") set (CPACK_PACKAGE_NAME "${ZLIB_PACKAGE_NAME}") - if (CDASH_LOCAL) - set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION}") - else () + if (NOT WIN32 OR ZLIB_VERS_SUBRELEASE MATCHES "^[0-9]+$") set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION_STRING}") + else () + set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION}") endif () set (CPACK_PACKAGE_VERSION_MAJOR "${ZLIB_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${ZLIB_PACKAGE_VERSION_MINOR}") diff --git a/config/cmake/examples/HDF5_Examples.cmake.in b/config/cmake/examples/HDF5_Examples.cmake.in index d77c16d49ec..f45e1ed6dbe 100644 --- a/config/cmake/examples/HDF5_Examples.cmake.in +++ b/config/cmake/examples/HDF5_Examples.cmake.in @@ -62,11 +62,6 @@ if(NOT DEFINED CTEST_SOURCE_NAME) set(CTEST_SOURCE_NAME "HDF5Examples") endif() -if(NOT DEFINED HDF_LOCAL) - set(CDASH_LOCAL "NO") -else() - set(CDASH_LOCAL "YES") -endif() if(NOT DEFINED CTEST_SITE) set(CTEST_SITE "local") endif() @@ -100,9 +95,6 @@ else() endif() ### default HDF5_PLUGIN_PATH to where the filter libraries are located set(ENV{HDF5_PLUGIN_PATH} "${INSTALLDIR}/lib/plugin") -if(${CDASH_LOCAL}) - set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCDASH_LOCAL:BOOL=ON") -endif() set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@") ############################################################################################################### From 87b3e7c8211d9c587589f4d8d2965efb119fbf6d Mon Sep 17 00:00:00 2001 From: Robert Adam Date: Mon, 30 Oct 2023 18:47:06 +0100 Subject: [PATCH 072/101] Don't build util tests when HDF5_EXTERNALLY_CONFIGURED=ON (#3781) Fixes #3780 --- c++/CMakeLists.txt | 2 +- fortran/CMakeLists.txt | 2 +- hl/CMakeLists.txt | 2 +- java/CMakeLists.txt | 2 +- tools/CMakeLists.txt | 2 +- utils/CMakeLists.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/c++/CMakeLists.txt b/c++/CMakeLists.txt index 2c0275bc586..b419c805880 100644 --- a/c++/CMakeLists.txt +++ b/c++/CMakeLists.txt @@ -13,6 +13,6 @@ endif () #----------------------------------------------------------------------------- # Build the CPP unit tests #----------------------------------------------------------------------------- -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) endif () diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index cf9b427b3a6..efaf963c108 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -22,7 +22,7 @@ endif () #----------------------------------------------------------------------------- # Testing #----------------------------------------------------------------------------- -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) if (MPI_Fortran_FOUND) add_subdirectory (testpar) diff --git a/hl/CMakeLists.txt b/hl/CMakeLists.txt index a777b72f540..45a9a22dded 100644 --- a/hl/CMakeLists.txt +++ b/hl/CMakeLists.txt @@ -26,6 +26,6 @@ if (HDF5_BUILD_EXAMPLES) endif () #-- Build the Unit testing if requested -if (BUILD_TESTING AND HDF5_TEST_SERIAL) +if (HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING AND HDF5_TEST_SERIAL) add_subdirectory (test) endif () diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index ae37ceb9ad8..4965f2c9584 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -49,7 +49,7 @@ endif () #----------------------------------------------------------------------------- # Testing #----------------------------------------------------------------------------- -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) endif () diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 55f2c2a576f..91c57c9cf08 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -7,7 +7,7 @@ add_subdirectory (lib) add_subdirectory (src) #-- Add the tests -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) # -------------------------------------------------------------------- diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 718f88e1907..b7e4630c07e 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_UTILS C) -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) endif () From e3f060852fdd1036453a715697effdf61c73b360 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Tue, 31 Oct 2023 08:04:54 -0500 Subject: [PATCH 073/101] Fix h5py CI failure. (#3805) --- .github/workflows/h5py.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/h5py.yml b/.github/workflows/h5py.yml index 10958441ab1..316a71d99b1 100644 --- a/.github/workflows/h5py.yml +++ b/.github/workflows/h5py.yml @@ -11,10 +11,11 @@ jobs: build: runs-on: ubuntu-latest steps: - - name: Install gfortran - run: | - sudo apt-get update - sudo apt-get install -y gfortran-12 + - name: Install Fortran + uses: fortran-lang/setup-fortran@v1 + with: + compiler: gcc + version: 13 - name: Checkout Spack uses: actions/checkout@v4 with: From 8df0008f0718d1152f41c66447f77934a0de66af Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 31 Oct 2023 08:05:12 -0500 Subject: [PATCH 074/101] Fix memory corruption in 'MPI I/O FAPL preserve' test (#3806) --- testpar/t_file.c | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/testpar/t_file.c b/testpar/t_file.c index ce55270cdd2..8f8b2914a70 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -1128,28 +1128,27 @@ test_evict_on_close_parallel_unsupp(void) void test_fapl_preserve_hints(void) { - hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ - hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ const char *filename; - - int nkeys_used; - bool same = false; - - MPI_Info info = MPI_INFO_NULL; - const char *key = "hdf_info_fapl"; - const char *value = "xyz"; - - MPI_Info info_used = MPI_INFO_NULL; - int flag = -1; - char value_used[20]; - char key_used[20]; - - int i; - herr_t ret; /* Generic return value */ - int mpi_ret; /* MPI return value */ + const char *key = "hdf_info_fapl"; + const char *value = "xyz"; + MPI_Info info_used = MPI_INFO_NULL; + MPI_Info info = MPI_INFO_NULL; + hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ + hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ + char key_used[MPI_MAX_INFO_KEY + 1]; + char *value_used = NULL; + bool same = false; + int flag = -1; + int nkeys_used; + int i; + int mpi_ret; /* MPI return value */ + herr_t ret; /* Generic return value */ filename = (const char *)GetTestParameters(); + value_used = malloc(MPI_MAX_INFO_VAL + 1); + VRFY(value_used, "malloc succeeded"); + /* set up MPI parameters */ mpi_ret = MPI_Info_create(&info); VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); @@ -1184,16 +1183,15 @@ test_fapl_preserve_hints(void) for (i = 0; i < nkeys_used; i++) { /* Memset the buffers to zero */ - memset(key_used, 0, 20); - memset(value_used, 0, 20); + memset(key_used, 0, MPI_MAX_INFO_KEY + 1); + memset(value_used, 0, MPI_MAX_INFO_VAL + 1); /* Get the nth key */ mpi_ret = MPI_Info_get_nthkey(info_used, i, key_used); VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get_nthkey succeeded"); if (!strcmp(key_used, key)) { - - mpi_ret = MPI_Info_get(info_used, key_used, 20, value_used, &flag); + mpi_ret = MPI_Info_get(info_used, key_used, MPI_MAX_INFO_VAL, value_used, &flag); VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get succeeded"); if (!strcmp(value_used, value)) { @@ -1220,4 +1218,6 @@ test_fapl_preserve_hints(void) mpi_ret = MPI_Info_free(&info_used); VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + free(value_used); + } /* end test_fapl_preserve_hints() */ From ea3b6fd12b4fb50c1d891e1408e99060b8afacd1 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 31 Oct 2023 08:05:25 -0500 Subject: [PATCH 075/101] Fix usage of h5_clean_files in t_pflush2.c (#3807) --- testpar/t_pflush2.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c index 95ad1257801..e1dce1bbfd7 100644 --- a/testpar/t_pflush2.c +++ b/testpar/t_pflush2.c @@ -20,7 +20,8 @@ #include "h5test.h" -static const char *FILENAME[] = {"flush", "noflush", NULL}; +static const char *FLUSH_FILENAME[] = {"flush", NULL}; +static const char *NOFLUSH_FILENAME[] = {"noflush", NULL}; static int *data_g = NULL; @@ -173,7 +174,7 @@ main(int argc, char *argv[]) goto error; /* Check the case where the file was flushed */ - h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name)); + h5_fixname(FLUSH_FILENAME[0], fapl_id1, name, sizeof(name)); if (check_test_file(name, sizeof(name), fapl_id1)) { H5_FAILED(); goto error; @@ -190,7 +191,7 @@ main(int argc, char *argv[]) H5Eget_auto2(H5E_DEFAULT, &func, NULL); H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name)); + h5_fixname(NOFLUSH_FILENAME[0], fapl_id2, name, sizeof(name)); if (check_test_file(name, sizeof(name), fapl_id2)) { if (mpi_rank == 0) PASSED(); @@ -202,8 +203,8 @@ main(int argc, char *argv[]) H5Eset_auto2(H5E_DEFAULT, func, NULL); - h5_clean_files(&FILENAME[0], fapl_id1); - h5_clean_files(&FILENAME[1], fapl_id2); + h5_clean_files(FLUSH_FILENAME, fapl_id1); + h5_clean_files(NOFLUSH_FILENAME, fapl_id2); if (data_g) { free(data_g); From ebc1651425e6052824bae82b2a8016f9a8af4ff2 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 31 Oct 2023 08:05:40 -0500 Subject: [PATCH 076/101] Fix parallel driver check in h5_fixname_real (#3808) --- test/h5test.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/test/h5test.c b/test/h5test.c index 304637580f6..ef580cf3072 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -457,7 +457,7 @@ h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fu const char *suffix = _suffix; size_t i, j; hid_t driver = H5I_INVALID_HID; - int isppdriver = 0; /* if the driver is MPI parallel */ + bool isppdriver = false; /* if the driver is MPI parallel */ if (!base_name || !fullname || size < 1) return NULL; @@ -516,10 +516,8 @@ h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fu } } - /* Must first check fapl is not H5P_DEFAULT (-1) because H5FD_XXX - * could be of value -1 if it is not defined. - */ - isppdriver = ((H5P_DEFAULT != fapl) || driver_env_var) && (H5FD_MPIO == driver); + if (h5_using_parallel_driver(fapl, &isppdriver) < 0) + return NULL; /* Check HDF5_NOCLEANUP environment setting. * (The #ifdef is needed to prevent compile failure in case MPI is not From 39c0284c38395aac9eb43364b14931fc2826b7f6 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 31 Oct 2023 08:05:52 -0500 Subject: [PATCH 077/101] Fix a couple usages of MPI_Info_get (#3809) --- src/H5FDmpio.c | 4 ++-- src/H5mpi.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index 7141550f40a..83a5ad45a9a 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -906,7 +906,7 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR /* copy over each hint */ for (i = 0; i < nkeys; i++) { - char key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL]; + char key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL + 1]; int valuelen, flag; /* retrieve the nth hint */ @@ -916,7 +916,7 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR if (MPI_SUCCESS != (mpi_code = MPI_Info_get_valuelen(info_used, key, &valuelen, &flag))) HMPI_GOTO_ERROR(NULL, "MPI_Info_get_valuelen failed", mpi_code) /* retrieve the value of nth hint */ - if (MPI_SUCCESS != (mpi_code = MPI_Info_get(info_used, key, valuelen + 1, value, &flag))) + if (MPI_SUCCESS != (mpi_code = MPI_Info_get(info_used, key, valuelen, value, &flag))) HMPI_GOTO_ERROR(NULL, "MPI_Info_get failed", mpi_code) /* copy the hint into info */ diff --git a/src/H5mpi.c b/src/H5mpi.c index 2725ec5bf19..cf7e33d46c9 100644 --- a/src/H5mpi.c +++ b/src/H5mpi.c @@ -380,9 +380,9 @@ H5_mpi_info_cmp(MPI_Info info1, MPI_Info info2, int *result) /* Allocate buffers for iteration */ if (NULL == (key = (char *)H5MM_malloc(MPI_MAX_INFO_KEY * sizeof(char)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); - if (NULL == (value1 = (char *)H5MM_malloc(MPI_MAX_INFO_VAL * sizeof(char)))) + if (NULL == (value1 = (char *)H5MM_malloc((MPI_MAX_INFO_VAL + 1) * sizeof(char)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); - if (NULL == (value2 = (char *)H5MM_malloc(MPI_MAX_INFO_VAL * sizeof(char)))) + if (NULL == (value2 = (char *)H5MM_malloc((MPI_MAX_INFO_VAL + 1) * sizeof(char)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); /* Iterate over the keys, comparing them */ From e9e4ffaec73d74075426f61ecdd9e9ea35fa6f50 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Wed, 1 Nov 2023 07:12:03 -0500 Subject: [PATCH 078/101] Remove H5system.c warning on Windows oneAPI. (#3812) --- src/H5system.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/H5system.c b/src/H5system.c index 30a89a174af..be886ae52f3 100644 --- a/src/H5system.c +++ b/src/H5system.c @@ -807,13 +807,12 @@ H5_nanosleep(uint64_t nanosec) #ifdef H5_HAVE_WIN32_API DWORD dwMilliseconds = (DWORD)ceil(nanosec / 1.0e6); - DWORD ignore; /* Windows can't sleep at a ns resolution. Best we can do is ~1 ms. We * don't care about the return value since the second parameter * (bAlertable) is false, so it will always be zero. */ - ignore = SleepEx(dwMilliseconds, false); + SleepEx(dwMilliseconds, false); #else From f8ab865ce75821a2e24127b1d52ce3070980c298 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 1 Nov 2023 07:13:22 -0500 Subject: [PATCH 079/101] Add processing of NVHPC flags in linux-gnulibc1 file (#3804) * Disable testing as tests are failing the same as in CMake --- .github/workflows/nvhpc-auto.yml | 10 +++++----- config/linux-gnulibc1 | 25 +++++++++++++++++++++++-- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/.github/workflows/nvhpc-auto.yml b/.github/workflows/nvhpc-auto.yml index 3e3a323fe1e..2a97ba1fd21 100644 --- a/.github/workflows/nvhpc-auto.yml +++ b/.github/workflows/nvhpc-auto.yml @@ -67,11 +67,11 @@ jobs: # RUN TESTS # NORMAL - - name: Autotools Run Tests - run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH - make check -j - working-directory: ${{ runner.workspace }}/build +# - name: Autotools Run Tests +# run: | +# export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH +# make check -j +# working-directory: ${{ runner.workspace }}/build # INSTALL (note that this runs even when we don't run the tests) - name: Autotools Install diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1 index 328f8d3cec3..92f2be63df5 100644 --- a/config/linux-gnulibc1 +++ b/config/linux-gnulibc1 @@ -47,6 +47,9 @@ fi # Figure out Clang C compiler flags . $srcdir/config/clang-flags +# Figure out NVHPC C compiler flags +. $srcdir/config/nvidia-flags + # Use default Fortran 90 compiler according to what C compiler is used. if test "X-" = "X-$FC"; then case $CC_BASENAME in @@ -58,6 +61,10 @@ if test "X-" = "X-$FC"; then FC=pgf90 FC_BASENAME=pgf90 ;; + nvc*) + FC=nvfortran + FC_BASENAME=nvfortran + ;; icx*) FC=ifx FC_BASENAME=ifx @@ -79,7 +86,7 @@ if test "X-" = "X-$FC"; then else case $FC in # The PGI and Intel compilers are automatically detected below - ifc*|ifort*|pgf90*) + ifc*|ifort*|pgf90*|nvfortran*) ;; *f95*) @@ -135,6 +142,9 @@ fi # Figure out Clang FC compiler flags . $srcdir/config/clang-fflags +# Figure out NVHPC FC compiler flags +. $srcdir/config/nvidia-fflags + case $FC_BASENAME in # # Absoft compiler @@ -226,6 +236,9 @@ fi # Figure out Clang CXX compiler flags . $srcdir/config/clang-cxxflags +# Figure out NVHPC CXX compiler flags +. $srcdir/config/nvidia-cxxflags + # compiler version strings # check if the compiler_version_info is already set @@ -248,7 +261,11 @@ case $CC in sed 's/\"/\\\"/g' |\ sed 's/^\([a-z]* \)/ built with \1/1'` cc_version_info=`echo $cc_version_info` - ;; + ;; + + *nvc*) + cc_version_info=`$CC $CFLAGS $H5_CFLAGS -V 2>&1 | grep 'nvc'` + ;; *icx*) cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ @@ -322,6 +339,10 @@ case $FC in fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'pgf90'` ;; + *nvfortran*) + fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'nvfortran'` + ;; + *nagfor*|*nagftn*) RM='rm -f' tmpfile=/tmp/cmpver.$$ From a654b2c1c5670010e14d8fba6d287ce62c87169a Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Wed, 1 Nov 2023 07:15:19 -0500 Subject: [PATCH 080/101] Use the current toolchain for examples as default (#3810) --- CMakeInstallation.cmake | 4 ++++ config/cmake/examples/HDF5_Examples.cmake.in | 7 +++++++ config/cmake/examples/HDF5_Examples_options.cmake | 13 +++++++++++++ config/cmake/scripts/HDF5options.cmake | 1 - 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index 74ab8b15fd2..02f8dc35058 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -141,6 +141,10 @@ install ( #----------------------------------------------------------------------------- option (HDF5_PACK_EXAMPLES "Package the HDF5 Library Examples Compressed File" OFF) if (HDF5_PACK_EXAMPLES) + if (DEFINED CMAKE_TOOLCHAIN_FILE) + get_filename_component(TOOLCHAIN ${CMAKE_TOOLCHAIN_FILE} NAME) + set(CTEST_TOOLCHAIN_FILE "\${CTEST_SOURCE_DIRECTORY}/config/toolchain/${TOOLCHAIN}") + endif () configure_file ( ${HDF_RESOURCES_DIR}/examples/HDF5_Examples.cmake.in ${HDF5_BINARY_DIR}/HDF5_Examples.cmake @ONLY diff --git a/config/cmake/examples/HDF5_Examples.cmake.in b/config/cmake/examples/HDF5_Examples.cmake.in index f45e1ed6dbe..2f3a6491a90 100644 --- a/config/cmake/examples/HDF5_Examples.cmake.in +++ b/config/cmake/examples/HDF5_Examples.cmake.in @@ -29,6 +29,7 @@ set(CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY}) #INSTALLDIR - HDF5 root folder #CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo #CTEST_SOURCE_NAME - name of source folder; HDF5Examples +#CTEST_TOOLCHAIN_FILE - name and path in source of toolchain file if(DEFINED CTEST_SCRIPT_ARG) # transform ctest script arguments of the form # script.ctest,var1=value1,var2=value2 @@ -96,6 +97,12 @@ endif() ### default HDF5_PLUGIN_PATH to where the filter libraries are located set(ENV{HDF5_PLUGIN_PATH} "${INSTALLDIR}/lib/plugin") set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@") +### use a toolchain file (supported everywhere) #### +if(NOT DEFINED CTEST_TOOLCHAIN_FILE) + set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=@CTEST_TOOLCHAIN_FILE@") +else() + set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=${CTEST_TOOLCHAIN_FILE}") +endif() ############################################################################################################### # For any comments please contact cdashhelp@hdfgroup.org diff --git a/config/cmake/examples/HDF5_Examples_options.cmake b/config/cmake/examples/HDF5_Examples_options.cmake index cdd49eb13ad..684ec5bf641 100644 --- a/config/cmake/examples/HDF5_Examples_options.cmake +++ b/config/cmake/examples/HDF5_Examples_options.cmake @@ -28,6 +28,19 @@ ### build with shared libraries #set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=ON") +############################################################################################# +#### maximum parallel processor count for build and test #### +#set(MAX_PROC_COUNT 8) + +############################################################################################# +#### alternate toolsets (Windows usually) #### +#set(CMAKE_GENERATOR_TOOLSET "Intel C++ Compiler 17.0") + +############################################################################################# +### use a toolchain file (supported everywhere) #### +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/clang.cmake") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/intel.cmake") + ############################################################################################# #### languages #### ### disable C builds diff --git a/config/cmake/scripts/HDF5options.cmake b/config/cmake/scripts/HDF5options.cmake index 92bfd37ecbe..5d078461be9 100644 --- a/config/cmake/scripts/HDF5options.cmake +++ b/config/cmake/scripts/HDF5options.cmake @@ -26,7 +26,6 @@ ############################################################################################# ### use a toolchain file (supported everywhere) #### - #set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/intel.cmake") ############################################################################################# From 562c53c44a22c979419deb1cb025b2a74900fbac Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 1 Nov 2023 08:16:33 -0500 Subject: [PATCH 081/101] Fix misc. warnings from GCC when compiling with -fsanitize=undefined (#3787) --- src/H5Dmpio.c | 3 ++- src/H5FDfamily.c | 3 ++- src/H5FDlog.c | 2 +- src/H5FDsec2.c | 2 +- src/H5T.c | 6 +++--- tools/src/misc/h5repart.c | 8 ++++++++ 6 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index b40ab4b2012..b6976e6a067 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -911,7 +911,8 @@ H5D__mpio_get_no_coll_cause_strings(char *local_cause, size_t local_cause_len, c case H5D_MPIO_COLLECTIVE: case H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE: default: - assert(0 && "invalid no collective cause reason"); + cause_str = "invalid or unknown no collective cause reason"; + assert(0 && "invalid or unknown no collective cause reason"); break; } diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c index 94805a23a8b..3f43ae9cc5b 100644 --- a/src/H5FDfamily.c +++ b/src/H5FDfamily.c @@ -1427,7 +1427,8 @@ H5FD__family_delete(const char *filename, hid_t fapl_id) FUNC_ENTER_PACKAGE - assert(filename); + if (!filename) + HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "invalid filename pointer"); /* Get the driver info (for the member fapl) * The family_open call accepts H5P_DEFAULT, so we'll accept that here, too. diff --git a/src/H5FDlog.c b/src/H5FDlog.c index 8d43dc836a6..e35a6a65c4a 100644 --- a/src/H5FDlog.c +++ b/src/H5FDlog.c @@ -545,7 +545,7 @@ H5FD__log_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr) #endif /* H5_HAVE_WIN32_API */ /* Retain a copy of the name used to open the file, for possible error reporting */ - strncpy(file->filename, name, sizeof(file->filename)); + strncpy(file->filename, name, sizeof(file->filename) - 1); file->filename[sizeof(file->filename) - 1] = '\0'; /* Get the flags for logging */ diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c index 29616842c15..15accf76d33 100644 --- a/src/H5FDsec2.c +++ b/src/H5FDsec2.c @@ -368,7 +368,7 @@ H5FD__sec2_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr } /* Retain a copy of the name used to open the file, for possible error reporting */ - strncpy(file->filename, name, sizeof(file->filename)); + strncpy(file->filename, name, sizeof(file->filename) - 1); file->filename[sizeof(file->filename) - 1] = '\0'; /* Check for non-default FAPL */ diff --git a/src/H5T.c b/src/H5T.c index ef94925982f..a02abfc18d0 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -2501,7 +2501,7 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con H5T_g.asoft = na; H5T_g.soft = x; } /* end if */ - strncpy(H5T_g.soft[H5T_g.nsoft].name, name, (size_t)H5T_NAMELEN); + strncpy(H5T_g.soft[H5T_g.nsoft].name, name, (size_t)H5T_NAMELEN - 1); H5T_g.soft[H5T_g.nsoft].name[H5T_NAMELEN - 1] = '\0'; H5T_g.soft[H5T_g.nsoft].src = src->shared->type; H5T_g.soft[H5T_g.nsoft].dst = dst->shared->type; @@ -2550,7 +2550,7 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con /* Create a new conversion path */ if (NULL == (new_path = H5FL_CALLOC(H5T_path_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); - strncpy(new_path->name, name, (size_t)H5T_NAMELEN); + strncpy(new_path->name, name, (size_t)H5T_NAMELEN - 1); new_path->name[H5T_NAMELEN - 1] = '\0'; if (NULL == (new_path->src = H5T_copy(old_path->src, H5T_COPY_ALL)) || NULL == (new_path->dst = H5T_copy(old_path->dst, H5T_COPY_ALL))) @@ -4953,7 +4953,7 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co if (NULL == (path = H5FL_CALLOC(H5T_path_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path"); if (name && *name) { - strncpy(path->name, name, (size_t)H5T_NAMELEN); + strncpy(path->name, name, (size_t)H5T_NAMELEN - 1); path->name[H5T_NAMELEN - 1] = '\0'; } /* end if */ else diff --git a/tools/src/misc/h5repart.c b/tools/src/misc/h5repart.c index feb447f7e09..12b293273bc 100644 --- a/tools/src/misc/h5repart.c +++ b/tools/src/misc/h5repart.c @@ -227,6 +227,10 @@ main(int argc, char *argv[]) if (argno >= argc) usage(prog_name); src_gen_name = argv[argno++]; + if (!src_gen_name) { + fprintf(stderr, "invalid source file name pointer"); + exit(EXIT_FAILURE); + } snprintf(src_name, NAMELEN, src_gen_name, src_membno); src_is_family = strcmp(src_name, src_gen_name); @@ -249,6 +253,10 @@ main(int argc, char *argv[]) if (argno >= argc) usage(prog_name); dst_gen_name = argv[argno++]; + if (!dst_gen_name) { + fprintf(stderr, "invalid destination file name pointer"); + exit(EXIT_FAILURE); + } snprintf(dst_name, NAMELEN, dst_gen_name, dst_membno); dst_is_family = strcmp(dst_name, dst_gen_name); From 64e239c4b6adcf597ac5ada6e7f03cbff126e833 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Wed, 1 Nov 2023 14:41:46 -0500 Subject: [PATCH 082/101] Set NVHPC maximum optimization level to -O1 for now (#3800) * Set NVHPC maximum optimization level to -O1 for now Compiling HDF5 with NVHPC 23.5 - 23.9 results in test failures in 4 different test files that need to be resolved. Since those tests pass with an optimization level of -O1 (and -O0) and it is currently unclear whether the test failures are due to issues in HDF5 or issues in the 'nvc' compiler, set the maximum optimization level for NVHPC to -O1 until the test failures are resolved. * Disable nvhpc Java testing in CMake and amend known issues * Re-enable testing of Autotools nvhpc --- .github/workflows/nvhpc-auto.yml | 10 ++++---- .github/workflows/nvhpc-cmake.yml | 12 ++++----- config/cmake/HDFCompilerFlags.cmake | 38 +++++++++++++++++++++++++++++ config/nvidia-flags | 6 +++-- release_docs/RELEASE.txt | 22 +++++++++++++++++ 5 files changed, 75 insertions(+), 13 deletions(-) diff --git a/.github/workflows/nvhpc-auto.yml b/.github/workflows/nvhpc-auto.yml index 2a97ba1fd21..3e3a323fe1e 100644 --- a/.github/workflows/nvhpc-auto.yml +++ b/.github/workflows/nvhpc-auto.yml @@ -67,11 +67,11 @@ jobs: # RUN TESTS # NORMAL -# - name: Autotools Run Tests -# run: | -# export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH -# make check -j -# working-directory: ${{ runner.workspace }}/build + - name: Autotools Run Tests + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + make check -j + working-directory: ${{ runner.workspace }}/build # INSTALL (note that this runs even when we don't run the tests) - name: Autotools Install diff --git a/.github/workflows/nvhpc-cmake.yml b/.github/workflows/nvhpc-cmake.yml index 489c0bbf3fb..e4a1454f215 100644 --- a/.github/workflows/nvhpc-cmake.yml +++ b/.github/workflows/nvhpc-cmake.yml @@ -56,7 +56,7 @@ jobs: -DLIBAEC_USE_LOCALCONTENT=OFF \ -DZLIB_USE_LOCALCONTENT=OFF \ -DHDF5_BUILD_FORTRAN:BOOL=ON \ - -DHDF5_ENABLE_ASSERTS:BOOL=ON \ + -DHDF5_BUILD_JAVA:BOOL=OFF \ -DMPIEXEC_MAX_NUMPROCS:STRING="2" \ $GITHUB_WORKSPACE cat src/libhdf5.settings @@ -69,8 +69,8 @@ jobs: working-directory: ${{ runner.workspace }}/build # RUN TESTS -# - name: CMake Run Tests -# shell: bash -# run: | -# ctest . --parallel 2 -C ${{ inputs.build_mode }} -V -# working-directory: ${{ runner.workspace }}/build + - name: CMake Run Tests + shell: bash + run: | + ctest . --parallel 2 -C ${{ inputs.build_mode }} -V + working-directory: ${{ runner.workspace }}/build diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index e7b9337f39c..7bddad0f776 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -56,6 +56,44 @@ if (CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) else () set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -g") endif () + + # With at least NVHPC 23.5 - 23.9, compiling with -O2 or higher and -DNDEBUG + # appears to have issues that manifest in the tests as incorrect metadata + # checksums being read or memory being corrupted. Compiling without -DNDEBUG + # does not appear to have these issues, but is not ideal due to compiling in + # asserts and other library debug code. Compiling with -O1 also does not appear + # to have these issues, so set maximum optimization level to -O1 for now until + # it can be determined whether these issues are compiler-specific or issues + # in the library. + set (cmake_c_flags_minsizerel_edited "${CMAKE_C_FLAGS_MINSIZEREL}") + string (REPLACE "-O2" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-O3" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-O4" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-Ofast" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-fast" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (STRIP "${cmake_c_flags_minsizerel_edited}" cmake_c_flags_minsizerel_edited) + string (PREPEND cmake_c_flags_minsizerel_edited "-O1 ") + set (CMAKE_C_FLAGS_MINSIZEREL "${cmake_c_flags_minsizerel_edited}") + + set (cmake_c_flags_release_edited "${CMAKE_C_FLAGS_RELEASE}") + string (REPLACE "-O2" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-O3" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-O4" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-Ofast" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-fast" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (STRIP "${cmake_c_flags_release_edited}" cmake_c_flags_release_edited) + string (PREPEND cmake_c_flags_release_edited "-O1 ") + set (CMAKE_C_FLAGS_RELEASE "${cmake_c_flags_release_edited}") + + set (cmake_c_flags_relwithdebinfo_edited "${CMAKE_C_FLAGS_RELWITHDEBINFO}") + string (REPLACE "-O2" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-O3" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-O4" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-Ofast" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-fast" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (STRIP "${cmake_c_flags_relwithdebinfo_edited}" cmake_c_flags_relwithdebinfo_edited) + string (PREPEND cmake_c_flags_relwithdebinfo_edited "-O1 ") + set (CMAKE_C_FLAGS_RELWITHDEBINFO "${cmake_c_flags_relwithdebinfo_edited}") endif () if (CMAKE_COMPILER_IS_GNUCC) diff --git a/config/nvidia-flags b/config/nvidia-flags index 864c6444114..c140edd9830 100644 --- a/config/nvidia-flags +++ b/config/nvidia-flags @@ -76,7 +76,8 @@ if test "X-nvc" = "X-$cc_vendor" -o "X-nvcc" = "X-$cc_vendor"; then ############## # NDEBUG is handled explicitly by the configure script - PROD_CFLAGS="-fast" + #PROD_CFLAGS="-fast" + PROD_CFLAGS="" # -fast implies -O2 and -O2+ currently has test failures. ######### # Debug # @@ -106,7 +107,8 @@ if test "X-nvc" = "X-$cc_vendor" -o "X-nvcc" = "X-$cc_vendor"; then ################ HIGH_OPT_CFLAGS="-O1" # -O2+ currently has test failures. - DEBUG_OPT_CFLAGS="-gopt -O2" + #DEBUG_OPT_CFLAGS="-gopt -O2" + DEBUG_OPT_CFLAGS="-gopt -O1" # -O2+ currently has test failures. NO_OPT_CFLAGS="-O0" ################# diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 0f18f6fa276..51a73ce1121 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -1158,6 +1158,28 @@ Platforms Tested Known Problems ============== + When HDF5 is compiled with NVHPC versions 23.5 - 23.9 (additional versions may + also be applicable) and with -O2 (or higher) and -DNDEBUG, test failures occur + in the following tests: + + H5PLUGIN-filter_plugin + H5TEST-flush2 + H5TEST-testhdf5-base + MPI_TEST_t_filters_parallel + + Since these tests pass with an optimization level of -O1 (and -O0) and it is + currently unclear whether the test failures are due to issues in HDF5 or issues + in the 'nvc' compiler, the maximum optimization level for NVHPC has been set + to -O1 until the test failures can be resolved. Note that even at -O1 optimization + level, there still appears to be a sporadic test failure in the Java JUnit tests + that has occasionally been seen in JUnit-TestH5Pfapl and JUnit-TestH5D. It is also + unclear whether this is an issue in HDF5 or with the 'nvc' compiler. Finally, note + that NVHPC 23.9 will fail to compile the test/tselect.c test file with a compiler + error of 'use of undefined value' when the optimization level is -O2 or higher. + Nvidia is aware of this issue and has suggested lowering the optimization level to + -O1 for the time being: + https://forums.developer.nvidia.com/t/hdf5-no-longer-compiles-with-nv-23-9/269045. + IEEE standard arithmetic enables software to raise exceptions such as overflow, division by zero, and other illegal operations without interrupting or halting the program flow. The HDF5 C library intentionally performs these exceptions. From 94a992689e0ff1de71f191f606bdc231211fb2d6 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Thu, 2 Nov 2023 00:11:23 -0500 Subject: [PATCH 083/101] Update NVHPC version from 23.7 to 23.9. (#3736) Co-authored-by: Larry Knox --- .github/workflows/nvhpc-auto.yml | 24 ++++++++++++------------ .github/workflows/nvhpc-cmake.yml | 12 ++++++------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/nvhpc-auto.yml b/.github/workflows/nvhpc-auto.yml index 3e3a323fe1e..0b6f64af0e9 100644 --- a/.github/workflows/nvhpc-auto.yml +++ b/.github/workflows/nvhpc-auto.yml @@ -31,21 +31,21 @@ jobs: curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list sudo apt-get update -y - sudo apt-get install -y nvhpc-23-7 - echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV - echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV + sudo apt-get install -y nvhpc-23-9 + echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV + echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV - echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++" >> $GITHUB_ENV - echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc" >> $GITHUB_ENV - echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/lib" >> $GITHUB_ENV echo "DESTDIR=/tmp" >> $GITHUB_ENV - name: Autotools Configure shell: bash run: | export RUNPARALLEL="mpiexec -np 2" - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH sh ./autogen.sh mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" @@ -61,7 +61,7 @@ jobs: - name: Autotools Build shell: bash run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH make -j3 working-directory: ${{ runner.workspace }}/build @@ -69,19 +69,19 @@ jobs: # NORMAL - name: Autotools Run Tests run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH make check -j working-directory: ${{ runner.workspace }}/build # INSTALL (note that this runs even when we don't run the tests) - name: Autotools Install run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH make install working-directory: ${{ runner.workspace }}/build # - name: Autotools Verify Install # run: | -# export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH +# export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH # make check-install # working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/nvhpc-cmake.yml b/.github/workflows/nvhpc-cmake.yml index e4a1454f215..1b0dbebc19e 100644 --- a/.github/workflows/nvhpc-cmake.yml +++ b/.github/workflows/nvhpc-cmake.yml @@ -32,20 +32,20 @@ jobs: curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list sudo apt-get update -y - sudo apt-get install -y nvhpc-23-7 + sudo apt-get install -y nvhpc-23-9 echo "CC=nvc" >> $GITHUB_ENV echo "FC=nvfortran" >> $GITHUB_ENV echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV - echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++" >> $GITHUB_ENV - echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc" >> $GITHUB_ENV - echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/cuda/12.2/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/cuda/12.2/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/lib" >> $GITHUB_ENV echo "DESTDIR=/tmp" >> $GITHUB_ENV - name: CMake Configure with nvc shell: bash run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ From 61982b60fd5e3a29044a1cdd1931b0ab9f96dec1 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Thu, 2 Nov 2023 07:55:50 -0500 Subject: [PATCH 084/101] Update some doxygen links to local refs (#3814) --- doxygen/dox/LearnBasics2.dox | 6 +++--- doxygen/dox/LearnBasics3.dox | 8 ++++---- doxygen/dox/Overview.dox | 4 ++-- doxygen/dox/ViewTools.dox | 8 ++++---- doxygen/dox/ViewTools2.dox | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/doxygen/dox/LearnBasics2.dox b/doxygen/dox/LearnBasics2.dox index 87bbe87fe47..f436a027d81 100644 --- a/doxygen/dox/LearnBasics2.dox +++ b/doxygen/dox/LearnBasics2.dox @@ -728,12 +728,12 @@ There are numerous datatype functions that allow a user to alter a pre-defined d Refer to the \ref H5T in the \ref RM. Example functions are #H5Tset_size and #H5Tset_precision. \section secLBDtypeSpec Specific Datatypes -On the Examples by API -page under Datatypes +On the \ref ExAPI +page under \ref sec_exapi_dtypes you will find many example programs for creating and reading datasets with different datatypes. Below is additional information on some of the datatypes. See -the Examples by API +the \ref ExAPI page for examples of these datatypes. \subsection subsecLBDtypeSpec Array Datatype vs Array Dataspace diff --git a/doxygen/dox/LearnBasics3.dox b/doxygen/dox/LearnBasics3.dox index 748745827f2..a91368b00fc 100644 --- a/doxygen/dox/LearnBasics3.dox +++ b/doxygen/dox/LearnBasics3.dox @@ -152,13 +152,13 @@ To use the compact storage layout, call: #H5Pset_layout \li Create a dataset with the modified property list. (See #H5Dcreate) \li Close the property list. (See #H5Pclose) For example code, see the \ref HDF5Examples page. -Specifically look at the Examples by API. +Specifically look at the \ref ExAPI. There are examples for different languages. The C example to create a chunked dataset is: -h5ex_d_chunk.c +h5ex_d_chunk.c The C example to create a compact dataset is: -h5ex_d_compact.c +h5ex_d_compact.c \section secLBDsetLayoutChange Changing the Layout after Dataset Creation The dataset layout is a Dataset Creation Property List. This means that once the dataset has been @@ -290,7 +290,7 @@ is met, at a certain point in the future.) \subsection subsecLBContentsProgUsing Using #H5Literate, #H5Lvisit and #H5Ovisit For example code, see the \ref HDF5Examples page. -Specifically look at the Examples by API. +Specifically look at the \ref ExAPI. There are examples for different languages, where examples of using #H5Literate and #H5Ovisit/#H5Lvisit are included. The h5ex_g_traverse example traverses a file using H5Literate: diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox index 54cc638d9d6..438788eb028 100644 --- a/doxygen/dox/Overview.dox +++ b/doxygen/dox/Overview.dox @@ -23,8 +23,8 @@ documents cover a mix of tasks, concepts, and reference, to help a specific \par Versions Version-specific documentation (see the version in the title area) can be found here: - - HDF5 develop branch (this site) - - HDF5 1.14.x + - HDF5 develop branch (this site) + - HDF5 1.14.x - HDF5 1.12.x - HDF5 1.10.x - HDF5 1.8.x diff --git a/doxygen/dox/ViewTools.dox b/doxygen/dox/ViewTools.dox index 66b2def0624..951605674be 100644 --- a/doxygen/dox/ViewTools.dox +++ b/doxygen/dox/ViewTools.dox @@ -53,7 +53,7 @@ packages, which can be obtained from the HDF-EOS and Tools and Information Center. Specifically, the following examples are used in this tutorial topic: \li HDF5 Files created from compiling the \ref LBExamples -\li HDF5 Files on the Examples by API page +\li HDF5 Files on the \ref ExAPI page \li NPP JPSS files, SVM01_npp.. (gzipped) and SVM09_npp.. (gzipped) \li HDF-EOS OMI-Aura file @@ -497,7 +497,7 @@ In the file shown below the dataset / \endcode You can obtain the h5ex_d_gzip.c program that created this file, as well as the file created, -from the Examples by API page. +from the \ref ExAPI page. \subsection subsecViewToolsViewDset_h5ls h5ls Specific datasets can be specified with h5ls by simply adding the dataset path and dataset after the @@ -925,7 +925,7 @@ The output of the above command is shown below: \subsection subsecViewToolsViewDtypes_h5dump h5dump The following datatypes are discussed, using the output of h5dump with HDF5 files from the -Examples by API page: +\ref ExAPI page:
  • @ref subsubsecViewToolsViewDtypes_array
  • @ref subsubsecViewToolsViewDtypes_objref
  • @@ -1036,7 +1036,7 @@ the elements or slab that is selected. A dataset with a Region Reference datatyp one or more Region References. An example of a Region Reference dataset (h5ex_t_regref.h5) can be found on the -Examples by API page, +\ref ExAPI page, under Datatypes. If you examine this dataset with h5dump you will see that /DS1 is a Region Reference dataset as indicated by its datatype, highlighted in bold below: \code diff --git a/doxygen/dox/ViewTools2.dox b/doxygen/dox/ViewTools2.dox index 4d8788a81fc..71d74652d4a 100644 --- a/doxygen/dox/ViewTools2.dox +++ b/doxygen/dox/ViewTools2.dox @@ -727,7 +727,7 @@ GROUP "/" { \endcode The second example imports string data. The example program that creates this file can be downloaded -from the Examples by API page. +from the \ref ExAPI page. Note that string data requires use of the h5dump -y option to exclude indexes and the h5dump --width=1 From 7404b57da68e92bd28c5da2053830e7cbfe032d1 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Thu, 2 Nov 2023 21:42:28 -0500 Subject: [PATCH 085/101] Rework MPI Info FAPL preserve PR to use VFD 'ctl' operations (#3782) --- src/H5FDmpi.c | 48 +++++++++++++++++++++++++++---- src/H5FDmpio.c | 7 +++++ src/H5FDprivate.h | 1 + src/H5FDpublic.h | 1 + src/H5FDsubfiling/H5FDsubfiling.c | 6 ++++ src/H5Fint.c | 25 ++++------------ src/H5Fmpi.c | 34 +++++++++++++++++++--- src/H5Fpkg.h | 1 - src/H5Fprivate.h | 1 + 9 files changed, 95 insertions(+), 29 deletions(-) diff --git a/src/H5FDmpi.c b/src/H5FDmpi.c index 127740efe3b..f247c3478f8 100644 --- a/src/H5FDmpi.c +++ b/src/H5FDmpi.c @@ -104,13 +104,12 @@ H5FD_mpi_get_size(H5FD_t *file) } /* end H5FD_mpi_get_size() */ /*------------------------------------------------------------------------- - * Function: H5FD_mpi_get_comm + * Function: H5FD_mpi_get_comm * - * Purpose: Retrieves the file's communicator + * Purpose: Retrieves the file's MPI_Comm communicator object * - * Return: Success: The communicator (non-negative) - * - * Failure: Negative + * Return: Success: The communicator object + * Failure: MPI_COMM_NULL * *------------------------------------------------------------------------- */ @@ -143,6 +142,45 @@ H5FD_mpi_get_comm(H5FD_t *file) FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_mpi_get_comm() */ +/*------------------------------------------------------------------------- + * Function: H5FD_mpi_get_info + * + * Purpose: Retrieves the file's MPI_Info info object + * + * Return: Success: The info object + * Failure: MPI_INFO_NULL + * + *------------------------------------------------------------------------- + */ +MPI_Info +H5FD_mpi_get_info(H5FD_t *file) +{ + const H5FD_class_t *cls; + uint64_t flags = H5FD_CTL_FAIL_IF_UNKNOWN_FLAG | H5FD_CTL_ROUTE_TO_TERMINAL_VFD_FLAG; + MPI_Info info = MPI_INFO_NULL; + void *info_ptr = (void *)(&info); + MPI_Info ret_value; + + FUNC_ENTER_NOAPI(MPI_INFO_NULL) + + assert(file); + cls = (const H5FD_class_t *)(file->cls); + assert(cls); + assert(cls->ctl); /* All MPI drivers must implement this */ + + /* Dispatch to driver */ + if ((cls->ctl)(file, H5FD_CTL_GET_MPI_INFO_OPCODE, flags, NULL, &info_ptr) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, MPI_INFO_NULL, "driver get_info request failed"); + + if (info == MPI_INFO_NULL) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, MPI_INFO_NULL, "driver get_info request failed -- bad info object"); + + ret_value = info; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_mpi_get_info() */ + /*------------------------------------------------------------------------- * Function: H5FD_mpi_MPIOff_to_haddr * diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index 83a5ad45a9a..d5dd1261178 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -3795,6 +3795,7 @@ H5FD__mpio_delete(const char *filename, hid_t fapl_id) * At present, the supported op codes are: * * H5FD_CTL_GET_MPI_COMMUNICATOR_OPCODE + * H5FD_CTL_GET_MPI_INFO_OPCODE * H5FD_CTL_GET_MPI_RANK_OPCODE * H5FD_CTL_GET_MPI_SIZE_OPCODE * H5FD_CTL_GET_MPI_FILE_SYNC_OPCODE @@ -3827,6 +3828,12 @@ H5FD__mpio_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void H5_AT **((MPI_Comm **)output) = file->comm; break; + case H5FD_CTL_GET_MPI_INFO_OPCODE: + assert(output); + assert(*output); + **((MPI_Info **)output) = file->info; + break; + case H5FD_CTL_GET_MPI_RANK_OPCODE: assert(output); assert(*output); diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h index 5330077565b..2fe54a588a9 100644 --- a/src/H5FDprivate.h +++ b/src/H5FDprivate.h @@ -214,6 +214,7 @@ H5_DLL herr_t H5FD_get_mpio_atomicity(H5FD_t *file, bool *flag); H5_DLL int H5FD_mpi_get_rank(H5FD_t *file); H5_DLL int H5FD_mpi_get_size(H5FD_t *file); H5_DLL MPI_Comm H5FD_mpi_get_comm(H5FD_t *file); +H5_DLL MPI_Info H5FD_mpi_get_info(H5FD_t *file); H5_DLL herr_t H5FD_mpi_get_file_sync_required(H5FD_t *file, bool *file_sync_required); #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h index 5f40bff6845..d8d77d6534b 100644 --- a/src/H5FDpublic.h +++ b/src/H5FDpublic.h @@ -179,6 +179,7 @@ #define H5FD_CTL_INVALID_OPCODE 0 #define H5FD_CTL_TEST_OPCODE 1 #define H5FD_CTL_GET_MPI_COMMUNICATOR_OPCODE 2 +#define H5FD_CTL_GET_MPI_INFO_OPCODE 9 #define H5FD_CTL_GET_MPI_RANK_OPCODE 3 #define H5FD_CTL_GET_MPI_SIZE_OPCODE 4 #define H5FD_CTL_MEM_ALLOC 5 diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index a2daba0d01b..461fa16cfe7 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -2551,6 +2551,12 @@ H5FD__subfiling_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void **((MPI_Comm **)output) = file->ext_comm; break; + case H5FD_CTL_GET_MPI_INFO_OPCODE: + assert(output); + assert(*output); + **((MPI_Info **)output) = file->info; + break; + case H5FD_CTL_GET_MPI_RANK_OPCODE: assert(output); assert(*output); diff --git a/src/H5Fint.c b/src/H5Fint.c index 439fa4f35a6..8738026d7c9 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -402,6 +402,7 @@ H5F_get_access_plist(H5F_t *f, bool app_ref) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set collective metadata read flag"); if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { MPI_Comm mpi_comm; + MPI_Info mpi_info; /* Retrieve and set MPI communicator */ if (MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(f))) @@ -409,9 +410,11 @@ H5F_get_access_plist(H5F_t *f, bool app_ref) if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_COMM_NAME, &mpi_comm) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI communicator"); - /* Retrieve MPI info object */ - if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &(f->shared->mpi_info)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI info object"); + /* Retrieve and set MPI info */ + if (MPI_INFO_NULL == (mpi_info = H5F_mpi_get_info(f))) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "can't get MPI info"); + if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &mpi_info) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI info"); } #endif /* H5_HAVE_PARALLEL */ if (H5P_set(new_plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < @@ -1130,12 +1133,6 @@ H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5F /* initialize point of no return */ f->shared->point_of_no_return = false; -#ifdef H5_HAVE_PARALLEL - /* Initialize this just in case we fail before setting this field and */ - /* we try to call H5_mpi_info_free() on uninitialized memory in H5F__dest() */ - f->shared->mpi_info = MPI_INFO_NULL; -#endif /* H5_HAVE_PARALLEL */ - /* Copy the file creation and file access property lists into the * new file handle. We do this early because some values might need * to change as the file is being opened. @@ -1212,8 +1209,6 @@ H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5F HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata read flag"); if (H5P_get(plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->shared->coll_md_write)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata write flag"); - if (H5P_get(plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &(f->shared->mpi_info)) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't set MPI info object"); #endif /* H5_HAVE_PARALLEL */ if (H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0) @@ -1419,14 +1414,6 @@ H5F__dest(H5F_t *f, bool flush, bool free_on_failure) f->shared->efc = NULL; } /* end if */ -#ifdef H5_HAVE_PARALLEL - if (f->shared->mpi_info != MPI_INFO_NULL) { - /* Free MPI info saved in the file struct */ - if (H5_mpi_info_free(&f->shared->mpi_info) < 0) - HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't free MPI info"); - } -#endif - /* With the shutdown modifications, the contents of the metadata cache * should be clean at this point, with the possible exception of the * the superblock and superblock extension. diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c index 8a8fdc135c8..7a535e90d7d 100644 --- a/src/H5Fmpi.c +++ b/src/H5Fmpi.c @@ -97,11 +97,10 @@ H5F_mpi_get_rank(const H5F_t *f) /*------------------------------------------------------------------------- * Function: H5F_mpi_get_comm * - * Purpose: Retrieves the file's communicator + * Purpose: Retrieves the file's MPI_Comm communicator object * - * Return: Success: The communicator (non-negative) - * - * Failure: Negative + * Return: Success: The communicator object + * Failure: MPI_COMM_NULL * *------------------------------------------------------------------------- */ @@ -122,6 +121,33 @@ H5F_mpi_get_comm(const H5F_t *f) FUNC_LEAVE_NOAPI(ret_value) } /* end H5F_mpi_get_comm() */ +/*------------------------------------------------------------------------- + * Function: H5F_mpi_get_info + * + * Purpose: Retrieves the file's MPI_Info info object + * + * Return: Success: The info object + * Failure: MPI_INFO_NULL + * + *------------------------------------------------------------------------- + */ +MPI_Info +H5F_mpi_get_info(const H5F_t *f) +{ + MPI_Info ret_value = MPI_INFO_NULL; + + FUNC_ENTER_NOAPI(MPI_INFO_NULL) + + assert(f && f->shared); + + /* Dispatch to driver */ + if ((ret_value = H5FD_mpi_get_info(f->shared->lf)) == MPI_INFO_NULL) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, MPI_INFO_NULL, "driver get_info request failed"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5F_mpi_get_info() */ + /*------------------------------------------------------------------------- * Function: H5F_shared_mpi_get_size * diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index e81b25072eb..bc5c90bd5da 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -359,7 +359,6 @@ struct H5F_shared_t { #ifdef H5_HAVE_PARALLEL H5P_coll_md_read_flag_t coll_md_read; /* Do all metadata reads collectively */ bool coll_md_write; /* Do all metadata writes collectively */ - MPI_Info mpi_info; /* MPI info */ #endif /* H5_HAVE_PARALLEL */ }; diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 9adbf3a0258..682e938120c 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -640,6 +640,7 @@ H5_DLL herr_t H5F_eoa_dirty(H5F_t *f); #ifdef H5_HAVE_PARALLEL H5_DLL int H5F_mpi_get_rank(const H5F_t *f); H5_DLL MPI_Comm H5F_mpi_get_comm(const H5F_t *f); +H5_DLL MPI_Info H5F_mpi_get_info(const H5F_t *f); H5_DLL int H5F_shared_mpi_get_size(const H5F_shared_t *f_sh); H5_DLL int H5F_mpi_get_size(const H5F_t *f); H5_DLL herr_t H5F_mpi_retrieve_comm(hid_t loc_id, hid_t acspl_id, MPI_Comm *mpi_comm); From 5e787741cdc991eb0011c9393152c1678ca737a8 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 3 Nov 2023 08:02:50 -0500 Subject: [PATCH 086/101] Removed the use of C wrappers from H5P APIs. (#3824) * fix seg fault on frontier/cray * fix seg fault on frontier/cray * fix seg fault on frontier/cray * removed the use of h5pclose_c * removed the use of h5pclose_c --- fortran/src/H5Off.F90 | 2 +- fortran/src/H5Pf.c | 70 ------------------------------ fortran/src/H5Pff.F90 | 55 ++++++++++++----------- fortran/src/H5f90proto.h | 4 -- fortran/src/hdf5_fortrandll.def.in | 2 - fortran/testpar/Makefile.am | 2 +- 6 files changed, 32 insertions(+), 103 deletions(-) diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index b705ba324d7..9c8b09141b8 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -1263,7 +1263,7 @@ SUBROUTINE h5oget_info_by_idx_f(loc_id, group_name, index_field, order, n, & INTERFACE INTEGER FUNCTION h5oget_info_by_idx_c(loc_id, group_name, namelen, & index_field, order, n, lapl_id_default, object_info, fields) BIND(C, NAME='h5oget_info_by_idx_c') - IMPORT :: c_char, c_ptr, c_funptr + IMPORT :: c_char, c_ptr IMPORT :: HID_T, SIZE_T, HSIZE_T INTEGER(HID_T) , INTENT(IN) :: loc_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: group_name diff --git a/fortran/src/H5Pf.c b/fortran/src/H5Pf.c index 3a97d7eab4a..87e6bfbebe6 100644 --- a/fortran/src/H5Pf.c +++ b/fortran/src/H5Pf.c @@ -69,30 +69,6 @@ h5pcreate_c(hid_t_f *cls, hid_t_f *prp_id) return ret_value; } -/****if* H5Pf/h5pclose_c - * NAME - * h5pclose_c - * PURPOSE - * Call H5Pclose to close property lis - * INPUTS - * prp_id - identifier of the property list to be closed - * RETURNS - * 0 on success, -1 on failure - * SOURCE - */ - -int_f -h5pclose_c(hid_t_f *prp_id) -/******/ -{ - int_f ret_value = 0; - - if (H5Pclose((hid_t)*prp_id) < 0) - ret_value = -1; - - return ret_value; -} - /****if* H5Pf/h5pcopy_c * NAME * h5pcopy_c @@ -2252,52 +2228,6 @@ h5pget_hyper_vector_size_c(hid_t_f *prp_id, size_t_f *size) return ret_value; } -/****if* H5Pf/h5pcreate_class_c - * NAME - * h5pcreate_class_c - * PURPOSE - * Call H5Pcreate_class ito create a new property class - * INPUTS - * parent - property list class identifier - * name - name of the new class - * name_len - length of the "name" buffer - * OUTPUTS - * class - new class identifier - * RETURNS - * 0 on success, -1 on failure - * SOURCE - */ -int_f -h5pcreate_class_c(hid_t_f *parent, _fcd name, int_f *name_len, hid_t_f *cls, H5P_cls_create_func_t create, - void *create_data, H5P_cls_copy_func_t copy, void *copy_data, H5P_cls_close_func_t close, - void *close_data) -/******/ -{ - int ret_value = -1; - hid_t c_class; - char *c_name; - - c_name = (char *)HD5f2cstring(name, (size_t)*name_len); - if (c_name == NULL) - goto DONE; - - /* - * Call H5Pcreate_class function. - */ - c_class = - H5Pcreate_class((hid_t)*parent, c_name, create, create_data, copy, copy_data, close, close_data); - - if (c_class < 0) - goto DONE; - *cls = (hid_t_f)c_class; - ret_value = 0; - -DONE: - if (c_name != NULL) - free(c_name); - return ret_value; -} - /****if* H5Pf/h5pregister_c * NAME * h5pregister_c diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 5821889c3e9..576509534ae 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -400,15 +400,16 @@ SUBROUTINE h5pclose_f(prp_id, hdferr) INTEGER(HID_T), INTENT(IN) :: prp_id INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5pclose_c(prp_id) & - BIND(C,NAME='h5pclose_c') + INTEGER(C_INT) FUNCTION H5Pclose(prp_id) & + BIND(C,NAME='H5Pclose') + IMPORT :: C_INT IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: prp_id - END FUNCTION h5pclose_c + INTEGER(HID_T), VALUE :: prp_id + END FUNCTION H5Pclose END INTERFACE - hdferr = h5pclose_c(prp_id) + hdferr = INT(H5Pclose(prp_id)) END SUBROUTINE h5pclose_f !> @@ -5005,31 +5006,32 @@ SUBROUTINE h5pcreate_class_f(parent, name, class, hdferr, create, create_data, & INTEGER , INTENT(OUT) :: hdferr TYPE(C_PTR) , OPTIONAL, INTENT(IN) :: create_data, copy_data, close_data TYPE(C_FUNPTR) , OPTIONAL, INTENT(IN) :: create, copy, close - INTEGER :: name_len - TYPE(C_PTR) :: create_data_default, copy_data_default, close_data_default + TYPE(C_PTR) :: create_data_default, copy_data_default, close_data_default TYPE(C_FUNPTR) :: create_default, copy_default, close_default + + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + INTERFACE - INTEGER FUNCTION h5pcreate_class_c(parent, name, name_len, class, & + INTEGER(HID_T) FUNCTION H5Pcreate_class(parent, name, & create, create_data, copy, copy_data, close, close_data) & - BIND(C, NAME='h5pcreate_class_c') - IMPORT :: c_char, c_ptr, c_funptr + BIND(C, NAME='H5Pcreate_class') + IMPORT :: C_CHAR, C_PTR, C_FUNPTR IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN) :: parent - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER, INTENT(IN) :: name_len - INTEGER(HID_T), INTENT(OUT) :: class - TYPE(C_PTR), VALUE :: create_data, copy_data, close_data - TYPE(C_FUNPTR), VALUE :: create, copy, close - END FUNCTION h5pcreate_class_c + INTEGER(HID_T), VALUE :: parent + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + TYPE(C_PTR), VALUE :: create_data, copy_data, close_data + TYPE(C_FUNPTR), VALUE :: create, copy, close + END FUNCTION H5Pcreate_class END INTERFACE - name_len = LEN(name) - create_default = c_null_funptr !fix:scot - create_data_default = c_null_ptr - copy_default = c_null_funptr !fix:scot - copy_data_default = c_null_ptr - close_default = c_null_funptr !fix:scot - close_data_default = c_null_ptr + c_name = TRIM(name)//C_NULL_CHAR + + create_default = C_NULL_FUNPTR + create_data_default = C_NULL_PTR + copy_default = C_NULL_FUNPTR + copy_data_default = C_NULL_PTR + close_default = C_NULL_FUNPTR + close_data_default = C_NULL_PTR IF(PRESENT(create)) create_default = create IF(PRESENT(create_data)) create_data_default = create_data @@ -5038,11 +5040,14 @@ END FUNCTION h5pcreate_class_c IF(PRESENT(close)) close_default = close IF(PRESENT(close_data)) close_data_default = close_data - hdferr = h5pcreate_class_c(parent, name , name_len, class, & + class = H5Pcreate_class(parent, c_name, & create_default, create_data_default, & copy_default, copy_data_default, & close_default, close_data_default) + hdferr = 0 + IF(class.LT.0) hdferr = -1 + END SUBROUTINE h5pcreate_class_f !> diff --git a/fortran/src/H5f90proto.h b/fortran/src/H5f90proto.h index 28a4fa66e7b..0fe1b2017a1 100644 --- a/fortran/src/H5f90proto.h +++ b/fortran/src/H5f90proto.h @@ -368,7 +368,6 @@ H5_FCDLL int_f h5otoken_cmp_c(hid_t_f *loc_id, H5O_token_t *token1, H5O_token_t * Functions from H5Pf.c */ H5_FCDLL int_f h5pcreate_c(hid_t_f *cls, hid_t_f *prp_id); -H5_FCDLL int_f h5pclose_c(hid_t_f *prp_id); H5_FCDLL int_f h5pcopy_c(hid_t_f *prp_id, hid_t_f *new_prp_id); H5_FCDLL int_f h5pequal_c(hid_t_f *plist1_id, hid_t_f *plist2_id, int_f *c_flag); H5_FCDLL int_f h5pget_class_c(hid_t_f *prp_id, hid_t_f *classtype); @@ -451,9 +450,6 @@ H5_FCDLL int_f h5pset_small_data_block_size_c(hid_t_f *plist, hsize_t_f *size); H5_FCDLL int_f h5pget_small_data_block_size_c(hid_t_f *plist, hsize_t_f *size); H5_FCDLL int_f h5pset_hyper_vector_size_c(hid_t_f *plist, size_t_f *size); H5_FCDLL int_f h5pget_hyper_vector_size_c(hid_t_f *plist, size_t_f *size); -H5_FCDLL int_f h5pcreate_class_c(hid_t_f *parent, _fcd name, int_f *name_len, hid_t_f *cls, - H5P_cls_create_func_t create, void *create_data, H5P_cls_copy_func_t copy, - void *copy_data, H5P_cls_close_func_t close, void *close_data); H5_FCDLL int_f h5pregister_c(hid_t_f *cls, _fcd name, int_f *name_len, size_t_f *size, void *value); H5_FCDLL int_f h5pinsert_c(hid_t_f *plist, _fcd name, int_f *name_len, size_t_f *size, void *value); H5_FCDLL int_f h5pset_c(hid_t_f *prp_id, _fcd name, int_f *name_len, void *value); diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index 2ded00222ba..00801776d4d 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -258,7 +258,6 @@ H5P_mp_H5PSET_PRESERVE_F H5P_mp_H5PGET_PRESERVE_F H5P_mp_H5PGET_CLASS_F H5P_mp_H5PCOPY_F -H5P_mp_H5PCLOSE_F H5P_mp_H5PSET_CHUNK_F H5P_mp_H5PGET_CHUNK_F H5P_mp_H5PSET_DEFLATE_F @@ -331,7 +330,6 @@ H5P_mp_H5PCOPY_PROP_F H5P_mp_H5PREMOVE_F H5P_mp_H5PUNREGISTER_F H5P_mp_H5PCLOSE_CLASS_F -H5P_mp_H5PCREATE_CLASS_F H5P_mp_H5PREGISTER_INTEGER H5P_mp_H5PREGISTER_CHAR H5P_mp_H5PINSERT_CHAR diff --git a/fortran/testpar/Makefile.am b/fortran/testpar/Makefile.am index afdda980c5c..1c374090601 100644 --- a/fortran/testpar/Makefile.am +++ b/fortran/testpar/Makefile.am @@ -36,7 +36,7 @@ TEST_PROG_PARA=parallel_test subfiling_test async_test check_PROGRAMS=$(TEST_PROG_PARA) # Temporary files -CHECK_CLEANFILES+=parf[12].h5 h5*_tests.h5 subf.h5* test_async_apis.mod +CHECK_CLEANFILES+=parf[12].h5 h5*_tests.h5 subf.h5* *.mod # Test source files parallel_test_SOURCES=ptest.F90 hyper.F90 mdset.F90 multidsetrw.F90 From 6a3c859e5857860f66b4bd20c0c7d57f3fb50724 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 3 Nov 2023 11:22:00 -0500 Subject: [PATCH 087/101] Fortran Wrappers H5VLnative_addr_to_token_f and H5VLnative_token_to_address_f (#3801) * Added H5VLnative_addr_to_token_f and H5VLnative_token_to_address_f * Added H5VLnative_addr_to_token_f and H5VLnative_token_to_address_f tests * Added H5VLnative_addr_to_token_f and H5VLnative_token_to_address_f tests --- fortran/src/H5VLff.F90 | 64 ++++++++++++++++++++++++++++++ fortran/src/hdf5_fortrandll.def.in | 2 + fortran/test/tH5O_F03.F90 | 34 +++++++++++----- release_docs/RELEASE.txt | 4 ++ 4 files changed, 95 insertions(+), 9 deletions(-) diff --git a/fortran/src/H5VLff.F90 b/fortran/src/H5VLff.F90 index 5a1fa9fd3b5..4467a59b171 100644 --- a/fortran/src/H5VLff.F90 +++ b/fortran/src/H5VLff.F90 @@ -401,4 +401,68 @@ END FUNCTION H5VLunregister_connector END SUBROUTINE H5VLunregister_connector_f +!> +!! \ingroup FH5VL +!! +!! \brief Retrieves the token representation from an address for a location identifier. +!! +!! \param loc_id Specifies a location identifier +!! \param addr Address for object in the file +!! \param token Token representing the object in the file +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5VLnative_addr_to_token() +!! + SUBROUTINE h5vlnative_addr_to_token_f(loc_id, addr, token, hdferr) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: loc_id + INTEGER(HADDR_T) , INTENT(IN) :: addr + TYPE(H5O_TOKEN_T_F), INTENT(OUT) :: token + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5VLnative_addr_to_token(loc_id, addr, token) BIND(C, NAME='H5VLnative_addr_to_token') + IMPORT :: HID_T, C_INT, HADDR_T, H5O_TOKEN_T_F + INTEGER(HID_T) , VALUE :: loc_id + INTEGER(HADDR_T), VALUE :: addr + TYPE(H5O_TOKEN_T_F) :: token + END FUNCTION H5VLnative_addr_to_token + END INTERFACE + + hdferr = INT(H5VLnative_addr_to_token(loc_id, addr, token)) + + END SUBROUTINE h5vlnative_addr_to_token_f + +!> +!! \ingroup FH5VL +!! +!! \brief Retrieves the object address from a token representation for a location identifier. +!! +!! \param loc_id Specifies a location identifier +!! \param token Token representing the object in the file +!! \param addr Address for object in the file +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5VLnative_token_to_addr() +!! + SUBROUTINE h5vlnative_token_to_addr_f(loc_id, token, addr, hdferr) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: loc_id + TYPE(H5O_TOKEN_T_F), INTENT(IN) :: token + INTEGER(HADDR_T) , INTENT(OUT) :: addr + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5VLnative_token_to_addr(loc_id, token, addr) BIND(C, NAME='H5VLnative_token_to_addr') + IMPORT :: HID_T, C_INT, HADDR_T, H5O_TOKEN_T_F + INTEGER(HID_T) , VALUE :: loc_id + TYPE(H5O_TOKEN_T_F), VALUE :: token + INTEGER(HADDR_T) :: addr + END FUNCTION H5VLnative_token_to_addr + END INTERFACE + + hdferr = INT(H5VLnative_token_to_addr(loc_id, token, addr)) + + END SUBROUTINE h5vlnative_token_to_addr_f + END MODULE H5VL diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index 00801776d4d..a8399a38ca7 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -553,6 +553,8 @@ H5VL_mp_H5VLGET_CONNECTOR_ID_BY_VALUE_F H5VL_mp_H5VLGET_CONNECTOR_NAME_F H5VL_mp_H5VLCLOSE_F H5VL_mp_H5VLUNREGISTER_CONNECTOR_F +H5VL_mp_H5VLNATIVE_ADDR_TO_TOKEN_F +H5VL_mp_H5VLNATIVE_TOKEN_TO_ADDR_F ; H5Z H5Z_mp_H5ZUNREGISTER_F H5Z_mp_H5ZFILTER_AVAIL_F diff --git a/fortran/test/tH5O_F03.F90 b/fortran/test/tH5O_F03.F90 index b27b0678644..a4cf282ecca 100644 --- a/fortran/test/tH5O_F03.F90 +++ b/fortran/test/tH5O_F03.F90 @@ -73,6 +73,8 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie INTEGER :: cmp_value INTEGER :: i INTEGER :: ierr + INTEGER(HADDR_T) :: addr + TYPE(H5O_TOKEN_T_F) :: token status = 0 @@ -82,7 +84,7 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie RETURN ENDIF token_c%token = oinfo_c%token%token - CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr); + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr) IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN status = -1 RETURN @@ -96,6 +98,22 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie RETURN ENDIF + CALL h5vlnative_token_to_addr_f(loc_id, oinfo_f%token, addr, ierr) + IF( ierr .EQ. -1) THEN + status = -1 + RETURN + ENDIF + CALL h5vlnative_addr_to_token_f(loc_id, addr, token, ierr) + IF( ierr .EQ. -1) THEN + status = -1 + RETURN + ENDIF + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token, cmp_value, ierr) + IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN + status = -1 + RETURN + ENDIF + ENDIF IF((field .EQ. H5O_INFO_TIME_F).OR.(field .EQ. H5O_INFO_ALL_F))THEN @@ -132,7 +150,7 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie status = 0 IF( oinfo_c%fileno .NE. oinfo_f%fileno) status = status + 1 token_c%token = oinfo_c%token%token - CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr); + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr) IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN status = -1 RETURN @@ -156,7 +174,7 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie status = 0 IF( oinfo_c%fileno .NE. oinfo_f%fileno) status = status + 1 token_c%token = oinfo_c%token%token - CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr); + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr) IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN status = -1 RETURN @@ -234,25 +252,24 @@ INTEGER FUNCTION visit_obj_cb( group_id, name, oinfo_c, op_data) bind(C) ENDIF ! Check H5Oget_info_by_name_f; if partial field values were filled correctly - CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr); + CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr) visit_obj_cb = compare_h5o_info_t( group_id, oinfo_f, oinfo_c, op_data%field, .TRUE. ) IF(visit_obj_cb.EQ.-1) RETURN ! Check H5Oget_info_by_name_f, only check field values - CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr, fields = op_data%field); + CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr, fields = op_data%field) visit_obj_cb = compare_h5o_info_t(group_id, oinfo_f, oinfo_c, op_data%field, .FALSE. ) IF(visit_obj_cb.EQ.-1) RETURN - IF(op_data%idx.EQ.1)THEN ! Check H5Oget_info_f, only check field values - CALL H5Oget_info_f(group_id, oinfo_f, ierr, fields = op_data%field); + CALL H5Oget_info_f(group_id, oinfo_f, ierr, fields = op_data%field) visit_obj_cb = compare_h5o_info_t(group_id, oinfo_f, oinfo_c, op_data%field, .FALSE. ) IF(visit_obj_cb.EQ.-1) RETURN ! Check H5Oget_info_f; if partial field values where filled correctly - CALL H5Oget_info_f(group_id, oinfo_f, ierr); + CALL H5Oget_info_f(group_id, oinfo_f, ierr) visit_obj_cb = compare_h5o_info_t(group_id, oinfo_f, oinfo_c, op_data%field, .TRUE. ) IF(visit_obj_cb.EQ.-1) RETURN @@ -677,7 +694,6 @@ SUBROUTINE test_obj_info(total_error) CALL check("h5oget_info_by_idx_f", -1, total_error) ENDIF - ! Close objects CALL h5dclose_f(did, error) CALL check("h5dclose_f", error, total_error) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 51a73ce1121..5dd36ea101c 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -335,6 +335,10 @@ New Features Fortran Library: ---------------- + + - Added Fortran APIs: + h5vlnative_addr_to_token_f and h5vlnative_token_to_address_f + - Fixed an uninitialized error return value for hdferr to return the error state of the h5aopen_by_idx_f API. From 3c0714797196fe428e1b81f3c7b02149676c54f7 Mon Sep 17 00:00:00 2001 From: Glenn Song <43005495+glennsong09@users.noreply.github.com> Date: Mon, 6 Nov 2023 09:27:51 -0600 Subject: [PATCH 088/101] Create test for H5Pget_dxpl_mpio (#3825) * Create test and add to testphdf5 --- testpar/t_ph5basic.c | 136 +++++++++++++++++++++++++++++++++++++++++++ testpar/testphdf5.c | 1 + testpar/testphdf5.h | 1 + 3 files changed, 138 insertions(+) diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c index b627b7c40ef..7fdefeb3ee9 100644 --- a/testpar/t_ph5basic.c +++ b/testpar/t_ph5basic.c @@ -177,3 +177,139 @@ test_fapl_mpio_dup(void) VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); } } /* end test_fapl_mpio_dup() */ + +/*------------------------------------------------------------------------- + * Function: test_get_dxpl_mpio + * + * Purpose: Test that H5Pget_dxpl_mpio will properly return the data + * transfer mode of collective and independent I/O access + * after setting it and writing some data. + * + * Return: Success: None + * Failure: Abort + *------------------------------------------------------------------------- + */ +void +test_get_dxpl_mpio(void) +{ + hid_t fid = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t fapl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + H5FD_mpio_xfer_t xfer_mode; + hsize_t dims[2] = {100, 100}; + hsize_t i, j; + int *data = NULL; + int mpi_rank, mpi_size; + const char *filename; + herr_t ret; + + if (VERBOSE_MED) + printf("Verify get_dxpl_mpio correctly gets the data transfer mode" + "set in the data transfer property list after a write\n"); + + /* Set up MPI for VRFY macro */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Initialize data array */ + data = malloc(100 * 100 * sizeof(*data)); + VRFY((data != NULL), "Data buffer initialized properly"); + + /* Create parallel fapl */ + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, FACC_MPIO); + VRFY((fapl >= 0), "Fapl creation succeeded"); + + /* Create a file */ + filename = (const char *)GetTestParameters(); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Create a dataset */ + sid = H5Screate_simple(2, dims, NULL); + VRFY((sid >= 0), "H5Screate succeeded"); + did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((did >= 0), "H5Dcreate2 succeeded"); + + /* Use collective I/O access */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio set to collective succeeded"); + + /* Write some data */ + for (i = 0; i < dims[0]; i++) + for (j = 0; j < dims[1]; j++) + data[(i * 100) + j] = (int)(i + (i * j) + j); + + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_COLLECTIVE), "Xfer_mode retrieved" + " successfully"); + + /* Read the data */ + ret = H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_COLLECTIVE), "Xfer_mode retrieved" + " successfully"); + + /* Check it does nothing on receiving NULL */ + ret = H5Pget_dxpl_mpio(dxpl, NULL); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded on NULL input"); + + /* Use independent I/O access */ + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio set to independent succeeded"); + + /* Write some data */ + for (i = 0; i < dims[0]; i++) + for (j = 0; j < dims[1]; j++) + data[(i * 100) + j] = (int)(i + (j * j) + i); + + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_INDEPENDENT), "Xfer_mode retrieved" + " successfully"); + + /* Read the data */ + ret = H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_INDEPENDENT), "Xfer_mode retrieved" + " successfully"); + + /* Close everything */ + free(data); + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Pclose(dxpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Dclose(did); + VRFY((ret >= 0), "H5Dclose succeeded"); + + ret = H5Sclose(sid); + VRFY((ret >= 0), "H5Sclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + +} /* end test_get_dxpl_mpio() */ diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 985c3de734c..e094ad6dcd3 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -351,6 +351,7 @@ main(int argc, char **argv) /* Tests are generally arranged from least to most complexity... */ AddTest("mpiodup", test_fapl_mpio_dup, NULL, "fapl_mpio duplicate", NULL); + AddTest("getdxplmpio", test_get_dxpl_mpio, NULL, "dxpl_mpio get", PARATESTFILE); AddTest("split", test_split_comm_access, NULL, "dataset using split communicators", PARATESTFILE); AddTest("h5oflusherror", test_oflush, NULL, "H5Oflush failure", PARATESTFILE); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 45f1a945d07..31b7c6963d5 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -242,6 +242,7 @@ void collective_group_write_independent_group_read(void); void collective_group_write(void); void independent_group_read(void); void test_fapl_mpio_dup(void); +void test_get_dxpl_mpio(void); void test_split_comm_access(void); void test_page_buffer_access(void); void dataset_atomicity(void); From ec695cf34f7256d7af9bbc2fcf5366da0119fcea Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Tue, 7 Nov 2023 09:50:03 -0600 Subject: [PATCH 089/101] Renamed h5fuse.sh to h5fuse (#3834) * provide an alternative to mapfile for older bash --- configure.ac | 4 ++-- release_docs/RELEASE.txt | 6 ++++++ testpar/t_subfiling_vfd.c | 4 ++-- utils/Makefile.am | 2 +- utils/subfiling_vfd/CMakeLists.txt | 8 ++++---- utils/subfiling_vfd/{h5fuse.sh.in => h5fuse.in} | 4 ++-- 6 files changed, 17 insertions(+), 11 deletions(-) rename utils/subfiling_vfd/{h5fuse.sh.in => h5fuse.in} (98%) diff --git a/configure.ac b/configure.ac index 44ab43a4664..30f5d2caeaa 100644 --- a/configure.ac +++ b/configure.ac @@ -4236,10 +4236,10 @@ AC_CONFIG_FILES([Makefile hl/fortran/examples/Makefile hl/fortran/examples/run-hlfortran-ex.sh]) -AC_CONFIG_FILES([utils/subfiling_vfd/h5fuse.sh], [chmod +x utils/subfiling_vfd/h5fuse.sh]) +AC_CONFIG_FILES([utils/subfiling_vfd/h5fuse], [chmod +x utils/subfiling_vfd/h5fuse]) if test -n "$TESTPARALLEL"; then if test "X$SUBFILING_VFD" = "Xyes"; then - AC_CONFIG_LINKS([testpar/h5fuse.sh:utils/subfiling_vfd/h5fuse.sh]) + AC_CONFIG_LINKS([testpar/h5fuse:utils/subfiling_vfd/h5fuse]) fi fi diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 5dd36ea101c..5e853783f8a 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -920,6 +920,12 @@ Bug Fixes since HDF5-1.14.0 release Tools ----- + + - Renamed h5fuse.sh to h5fuse + + Addresses Discussion #3791 + + - Fixed an issue with unmatched MPI messages in ph5diff The "manager" MPI rank in ph5diff was unintentionally sending "program end" diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index 72613a3bef1..45cb363fd6a 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -1898,7 +1898,7 @@ test_subfiling_h5fuse(void) if (MAINPROCESS) { FILE *h5fuse_script; - h5fuse_script = fopen("h5fuse.sh", "r"); + h5fuse_script = fopen("h5fuse", "r"); if (h5fuse_script) fclose(h5fuse_script); else @@ -2014,7 +2014,7 @@ test_subfiling_h5fuse(void) SUBF_FILENAME, file_inode); args[0] = strdup("env"); - args[1] = strdup("./h5fuse.sh"); + args[1] = strdup("./h5fuse"); args[2] = strdup("-q"); args[3] = strdup("-f"); args[4] = tmp_filename; diff --git a/utils/Makefile.am b/utils/Makefile.am index 229712e6aaf..740f5c36fbf 100644 --- a/utils/Makefile.am +++ b/utils/Makefile.am @@ -41,6 +41,6 @@ endif # All subdirectories SUBDIRS=$(MIRROR_VFD_DIR) $(TESTUTIL_DIR) $(TOOLS_DIR) -bin_SCRIPTS = subfiling_vfd/h5fuse.sh +bin_SCRIPTS = subfiling_vfd/h5fuse include $(top_srcdir)/config/conclude.am diff --git a/utils/subfiling_vfd/CMakeLists.txt b/utils/subfiling_vfd/CMakeLists.txt index 3acdc6bc24f..62b62557a27 100644 --- a/utils/subfiling_vfd/CMakeLists.txt +++ b/utils/subfiling_vfd/CMakeLists.txt @@ -1,20 +1,20 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_UTILS_SUBFILINGVFD C) -configure_file (${HDF5_UTILS_SUBFILINGVFD_SOURCE_DIR}/h5fuse.sh.in ${HDF5_UTILS_SUBFILINGVFD_BINARY_DIR}/h5fuse.sh @ONLY) +configure_file (${HDF5_UTILS_SUBFILINGVFD_SOURCE_DIR}/h5fuse.in ${HDF5_UTILS_SUBFILINGVFD_BINARY_DIR}/h5fuse @ONLY) -# Copy h5fuse.sh to testpar directory for subfiling tests +# Copy h5fuse to testpar directory for subfiling tests if (HDF5_ENABLE_PARALLEL AND HDF5_TEST_PARALLEL) file ( COPY - ${HDF5_UTILS_SUBFILINGVFD_BINARY_DIR}/h5fuse.sh + ${HDF5_UTILS_SUBFILINGVFD_BINARY_DIR}/h5fuse DESTINATION ${HDF5_TEST_PAR_BINARY_DIR} ) endif () install ( - FILES ${HDF5_UTILS_SUBFILINGVFD_BINARY_DIR}/h5fuse.sh + FILES ${HDF5_UTILS_SUBFILINGVFD_BINARY_DIR}/h5fuse DESTINATION ${HDF5_INSTALL_BIN_DIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE COMPONENT utilsapplications diff --git a/utils/subfiling_vfd/h5fuse.sh.in b/utils/subfiling_vfd/h5fuse.in similarity index 98% rename from utils/subfiling_vfd/h5fuse.sh.in rename to utils/subfiling_vfd/h5fuse.in index 6f4bf619bbe..a4c6a053cc0 100755 --- a/utils/subfiling_vfd/h5fuse.sh.in +++ b/utils/subfiling_vfd/h5fuse.in @@ -26,11 +26,11 @@ function usage { configuration file either as a command-line argument or the script will search for the *.config file in the current directory." echo "" - echo "usage: h5fuse.sh [-f filename] [-h] [-p] [-q] [-r] [-v] " + echo "usage: h5fuse [-f filename] [-h] [-p] [-q] [-r] [-v] " echo "-f filename Subfile configuration file." echo "-h Print this help." echo "-q Quiet all output. [no]" - echo "-p h5fuse.sh is being run in parallel, with more than one rank. [no]" + echo "-p h5fuse is being run in parallel, with more than one rank. [no]" echo "-r Remove subfiles after being processed. [no]" echo "-v Verbose output. [no]" echo "" From e0d095ebf020706ec7d7c82e6674b18f1a0a2d5b Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Tue, 7 Nov 2023 08:13:30 -0800 Subject: [PATCH 090/101] Disable FP exceptions in H5T init code (#3837) The H5T floating-point datatype initialization code can raise exceptions when handling signaling NaNs. This change disables FE_INVALID exceptions during initialization. Also removes the -ieee=full change for NAG Fortran as that shouldn't be necessary anymore. Fixes #3831 --- config/linux-gnulibc1 | 5 +--- release_docs/RELEASE.txt | 22 +++++++++++----- src/H5Tinit_float.c | 54 ++++++++++++++++++++++++++-------------- src/H5private.h | 1 + 4 files changed, 54 insertions(+), 28 deletions(-) diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1 index 92f2be63df5..b4139ee1a58 100644 --- a/config/linux-gnulibc1 +++ b/config/linux-gnulibc1 @@ -183,10 +183,7 @@ case $FC_BASENAME in nagfor) F9XSUFFIXFLAG="" - # NOTE: The default is -ieee=stop, which will cause problems - # when the H5T module performs floating-point type - # introspection - AM_FCFLAGS="$AM_FCFLAGS -ieee=full" + AM_FCFLAGS="$AM_FCFLAGS" FSEARCH_DIRS="" # Production diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 5e853783f8a..145457400bc 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -408,6 +408,22 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Suppressed floating-point exceptions in H5T init code + + The floating-point datatype initialization code in H5Tinit_float.c + could raise FE_INVALID exceptions while munging bits and performing + comparisons that might involve NaN. This was not a problem when the + initialization code was executed in H5detect at compile time (prior + to 1.14.3), but now that the code is executed at library startup + (1.14.3+), these exceptions can be caught by user code, as is the + default in the NAG Fortran compiler. + + Starting in 1.14.4, we now suppress floating-point exceptions while + initializing the floating-point types and clear FE_INVALID before + restoring the original environment. + + Fixes GitHub #3831 + - Fixed a file handle leak in the core VFD When opening a file with the core VFD and a file image, if the file @@ -1190,12 +1206,6 @@ Known Problems -O1 for the time being: https://forums.developer.nvidia.com/t/hdf5-no-longer-compiles-with-nv-23-9/269045. - IEEE standard arithmetic enables software to raise exceptions such as overflow, - division by zero, and other illegal operations without interrupting or halting - the program flow. The HDF5 C library intentionally performs these exceptions. - Therefore, the "-ieee=full" nagfor switch is necessary when compiling a program - to avoid stopping on an exception. - CMake files do not behave correctly with paths containing spaces. Do not use spaces in paths because the required escaping for handling spaces results in very complex and fragile build files. diff --git a/src/H5Tinit_float.c b/src/H5Tinit_float.c index 3b9e127fe4e..3213f00fece 100644 --- a/src/H5Tinit_float.c +++ b/src/H5Tinit_float.c @@ -51,19 +51,23 @@ * Function: DETECT_F * * Purpose: This macro takes a floating point type like `double' and - * a base name like `natd' and detects byte order, mantissa - * location, exponent location, sign bit location, presence or - * absence of implicit mantissa bit, and exponent bias and - * initializes a detected_t structure with those properties. + * and detects byte order, mantissa location, exponent location, + * sign bit location, presence or absence of implicit mantissa + * bit, and exponent bias and initializes a detected_t structure + * with those properties. + * + * Note that these operations can raise floating-point + * exceptions and building with some compiler options + * (especially Fortran) can cause problems. *------------------------------------------------------------------------- */ -#define DETECT_F(TYPE, VAR, INFO) \ +#define DETECT_F(TYPE, INFO) \ do { \ - TYPE _v1, _v2, _v3; \ - unsigned char _buf1[sizeof(TYPE)], _buf3[sizeof(TYPE)]; \ - unsigned char _pad_mask[sizeof(TYPE)]; \ - unsigned char _byte_mask; \ - int _i, _j, _last = (-1); \ + TYPE _v1, _v2, _v3; \ + uint8_t _buf1[sizeof(TYPE)], _buf3[sizeof(TYPE)]; \ + uint8_t _pad_mask[sizeof(TYPE)]; \ + uint8_t _byte_mask; \ + int _i, _j, _last = -1; \ \ memset(&INFO, 0, sizeof(INFO)); \ INFO.size = sizeof(TYPE); \ @@ -81,7 +85,7 @@ _v1 = (TYPE)4.0L; \ H5MM_memcpy(_buf1, (const void *)&_v1, sizeof(TYPE)); \ for (_i = 0; _i < (int)sizeof(TYPE); _i++) \ - for (_byte_mask = (unsigned char)1; _byte_mask; _byte_mask = (unsigned char)(_byte_mask << 1)) { \ + for (_byte_mask = (uint8_t)1; _byte_mask; _byte_mask = (uint8_t)(_byte_mask << 1)) { \ _buf1[_i] ^= _byte_mask; \ H5MM_memcpy((void *)&_v2, (const void *)_buf1, sizeof(TYPE)); \ H5_GCC_CLANG_DIAG_OFF("float-equal") \ @@ -118,7 +122,7 @@ _v1 = (TYPE)1.0L; \ _v2 = (TYPE)-1.0L; \ if (H5T__bit_cmp(sizeof(TYPE), INFO.perm, &_v1, &_v2, _pad_mask, &(INFO.sign)) < 0) \ - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to detect byte order"); \ + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to determine sign bit"); \ \ /* Mantissa */ \ INFO.mpos = 0; \ @@ -126,12 +130,11 @@ _v1 = (TYPE)1.0L; \ _v2 = (TYPE)1.5L; \ if (H5T__bit_cmp(sizeof(TYPE), INFO.perm, &_v1, &_v2, _pad_mask, &(INFO.msize)) < 0) \ - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to detect byte order"); \ + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "failed to determine mantissa"); \ INFO.msize += 1 + (unsigned)(INFO.imp ? 0 : 1) - INFO.mpos; \ \ /* Exponent */ \ - INFO.epos = INFO.mpos + INFO.msize; \ - \ + INFO.epos = INFO.mpos + INFO.msize; \ INFO.esize = INFO.sign - INFO.epos; \ \ _v1 = (TYPE)1.0L; \ @@ -456,17 +459,24 @@ H5T__set_precision(H5T_fpoint_det_t *d) herr_t H5_NO_UBSAN H5T__init_native_float_types(void) { + fenv_t saved_fenv; H5T_fpoint_det_t det; H5T_t *dt = NULL; herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE + /* Turn off floating-point exceptions while initializing to avoid + * tripping over signaling NaNs while looking at "don't care" bits. + */ + if (feholdexcept(&saved_fenv) != 0) + HSYS_GOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "can't save floating-point environment"); + /* H5T_NATIVE_FLOAT */ /* Get the type's characteristics */ memset(&det, 0, sizeof(H5T_fpoint_det_t)); - DETECT_F(float, FLOAT, det); + DETECT_F(float, det); /* Allocate and fill type structure */ if (NULL == (dt = H5T__alloc())) @@ -497,7 +507,7 @@ H5T__init_native_float_types(void) /* Get the type's characteristics */ memset(&det, 0, sizeof(H5T_fpoint_det_t)); - DETECT_F(double, DOUBLE, det); + DETECT_F(double, det); /* Allocate and fill type structure */ if (NULL == (dt = H5T__alloc())) @@ -528,7 +538,7 @@ H5T__init_native_float_types(void) /* Get the type's characteristics */ memset(&det, 0, sizeof(H5T_fpoint_det_t)); - DETECT_F(long double, LDOUBLE, det); + DETECT_F(long double, det); /* Allocate and fill type structure */ if (NULL == (dt = H5T__alloc())) @@ -561,6 +571,14 @@ H5T__init_native_float_types(void) H5T_native_order_g = det.order; done: + /* Clear any FE_INVALID exceptions from NaN handling */ + if (feclearexcept(FE_INVALID) != 0) + HSYS_GOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "can't clear floating-point exceptions"); + + /* Restore the original environment */ + if (feupdateenv(&saved_fenv) != 0) + HSYS_GOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "can't restore floating-point environment"); + if (ret_value < 0) { if (dt != NULL) { dt->shared = H5FL_FREE(H5T_shared_t, dt->shared); diff --git a/src/H5private.h b/src/H5private.h index 14a0ac3225f..3aaa0d52453 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include From 66786fa036fa6da86c56725808c3d82d48a89b0e Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 10 Nov 2023 07:36:46 -0600 Subject: [PATCH 091/101] Add intel oneapi windows build to CI CMake (#3836) --- .github/workflows/intel-cmake.yml | 114 ++++++++++++++++------- .github/workflows/main-cmake.yml | 60 ++++++++++++ config/cmake-presets/hidden-presets.json | 80 +++++++++++++++- fortran/examples/ph5example.f90 | 6 +- fortran/src/CMakeLists.txt | 24 +++-- fortran/src/H5Pff.F90 | 2 +- fortran/src/H5config_f.inc.cmake | 34 ++++--- fortran/src/H5config_f.inc.in | 2 +- fortran/src/hdf5_fortrandll.def.in | 12 ++- fortran/test/tH5T_F03.F90 | 14 +-- hl/fortran/src/CMakeLists.txt | 18 ++-- hl/fortran/src/H5LTff.F90 | 14 +-- 12 files changed, 282 insertions(+), 98 deletions(-) diff --git a/.github/workflows/intel-cmake.yml b/.github/workflows/intel-cmake.yml index 9972376332e..94dc6cf41e6 100644 --- a/.github/workflows/intel-cmake.yml +++ b/.github/workflows/intel-cmake.yml @@ -13,69 +13,111 @@ permissions: contents: read jobs: - Intel_build_and_test: - name: "Intel ${{ inputs.build_mode }}" + # Linux (Ubuntu) w/ gcc + CMake + # + Intel_oneapi_linux: + name: "ubuntu-oneapi ${{ inputs.build_mode }}" runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: add oneAPI to apt - shell: bash - run: | - cd /tmp - wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - sudo add-apt-repository -y "deb https://apt.repos.intel.com/oneapi all main" - + # Only CMake need ninja-build, but we just install it unilaterally + # libssl, etc. are needed for the ros3 VFD - name: Install Linux Dependencies run: | - sudo apt update - sudo apt-get install ninja-build doxygen graphviz - sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev - - name: install oneAPI dpcpp and fortran compiler - shell: bash - run: | - sudo apt update - sudo apt install -y intel-oneapi-compiler-dpcpp-cpp - sudo apt install -y intel-oneapi-compiler-fortran - - - name: install oneAPI MKL library - shell: bash - run: | - sudo apt install -y intel-oneapi-mkl-devel + - name: add oneAPI to env + uses: fortran-lang/setup-fortran@v1 + id: setup-fortran + with: + compiler: intel + version: '2023.2' - - name: CMake Configure with icx + - name: CMake Configure with oneapi shell: bash + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} run: | - source /opt/intel/oneapi/setvars.sh - export PATH=$PATH:/opt/intel/oneapi/compiler/2023.2.1/linux/bin mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" - cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ + cmake -C ${{ github.workspace }}/config/cmake/cacheinit.cmake -G Ninja \ -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ - -DCMAKE_TOOLCHAIN_FILE=config/toolchain/intel.cmake \ - -DMKL_ROOT="/opt/intel/oneapi/mkl/latest" \ - -DTBB_ROOT="/opt/intel/oneapi/tbb/latest" \ -DLIBAEC_USE_LOCALCONTENT=OFF \ -DZLIB_USE_LOCALCONTENT=OFF \ - -DHDF5_BUILD_FORTRAN=OFF \ $GITHUB_WORKSPACE # BUILD - name: CMake Build shell: bash + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} run: | - source /opt/intel/oneapi/setvars.sh cmake --build . --parallel 3 --config ${{ inputs.build_mode }} working-directory: ${{ runner.workspace }}/build # RUN TESTS - name: CMake Run Tests shell: bash + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} run: | - source /opt/intel/oneapi/setvars.sh - export SYCL_DEVICE_FILTER=opencl.cpu ctest . --parallel 2 -C ${{ inputs.build_mode }} -V working-directory: ${{ runner.workspace }}/build + + Intel_oneapi_windows: + name: "windows-oneapi ${{ inputs.build_mode }}" + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Dependencies (Windows) + run: choco install ninja + + - name: add oneAPI to env + uses: fortran-lang/setup-fortran@v1 + id: setup-fortran + with: + compiler: intel + version: '2023.2' + + - name: CMake Configure with oneapi + shell: pwsh + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} + run: | + mkdir "${{ runner.workspace }}/build" + Set-Location -Path "${{ runner.workspace }}\\build" + cmake -C ${{ github.workspace }}/config/cmake/cacheinit.cmake -G Ninja -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} -DHDF5_BUILD_FORTRAN=ON -DLIBAEC_USE_LOCALCONTENT=OFF -DZLIB_USE_LOCALCONTENT=OFF ${{ github.workspace }} + + # BUILD + - name: CMake Build + shell: pwsh + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} + run: | + cmake --build . --parallel 3 --config ${{ inputs.build_mode }} + working-directory: ${{ runner.workspace }}/build + + # RUN TESTS + - name: CMake Run Tests + shell: pwsh + env: + FC: ${{ steps.setup-fortran.outputs.fc }} + CC: ${{ steps.setup-fortran.outputs.cc }} + CXX: ${{ steps.setup-fortran.outputs.cxx }} + run: | + ctest . --parallel 2 -C ${{ inputs.build_mode }} -V -E tfloatsattrs + working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/main-cmake.yml b/.github/workflows/main-cmake.yml index 0bf383b0dd9..18d4a39584c 100644 --- a/.github/workflows/main-cmake.yml +++ b/.github/workflows/main-cmake.yml @@ -249,3 +249,63 @@ jobs: # # INSTALL (note that this runs even when we don't run the tests) # + + # + # The GitHub runners are inadequate for running parallel HDF5 tests, + # so we catch most issues in daily testing. What we have here is just + # a compile check to make sure nothing obvious is broken. + # A workflow that builds the library + # Parallel Linux (Ubuntu) w/ gcc + Autotools + # + CMake_build_parallel: + name: "Parallel GCC-${{ inputs.build_mode }}-TS=${{ inputs.thread_safety }}" + # Don't run the action if the commit message says to skip CI + if: "!contains(github.event.head_commit.message, 'skip-ci')" + + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # SETUP + - name: Install Linux Dependencies + run: | + sudo apt update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install gcc-12 g++-12 gfortran-12 + sudo apt install libaec0 libaec-dev + sudo apt install openmpi-bin openmpi-common mpi-default-dev + echo "CC=mpicc" >> $GITHUB_ENV + echo "FC=mpif90" >> $GITHUB_ENV + + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - name: Get Sources + uses: actions/checkout@v4 + + # CMAKE CONFIGURE + - name: CMake Configure + run: | + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + CC=mpicc cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ + -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ + -DCMAKE_TOOLCHAIN_FILE=${{ matrix.toolchain }} \ + -DBUILD_SHARED_LIBS=ON \ + -DHDF5_ENABLE_ALL_WARNINGS=ON \ + -DHDF5_ENABLE_PARALLEL:BOOL=ON \ + -DHDF5_BUILD_CPP_LIB:BOOL=OFF \ + -DHDF5_BUILD_FORTRAN=ON \ + -DHDF5_BUILD_JAVA=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_ENABLE_MIRROR_VFD:BOOL=OFF \ + -DHDF5_ENABLE_DIRECT_VFD:BOOL=OFF \ + -DHDF5_ENABLE_ROS3_VFD:BOOL=OFF \ + $GITHUB_WORKSPACE + shell: bash + + # BUILD + - name: CMake Build + run: cmake --build . --parallel 3 --config ${{ inputs.build_mode }} + working-directory: ${{ runner.workspace }}/build diff --git a/config/cmake-presets/hidden-presets.json b/config/cmake-presets/hidden-presets.json index fad63b7b387..81ac98f2446 100644 --- a/config/cmake-presets/hidden-presets.json +++ b/config/cmake-presets/hidden-presets.json @@ -204,6 +204,28 @@ "ci-GNUC" ] }, + { + "name": "ci-x64-Debug-Intel", + "description": "Intel for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Debug", + "ci-Intel" + ] + }, + { + "name": "ci-x64-Release-Intel", + "description": "Intel for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-x64", + "ci-Release", + "ci-Intel" + ] + }, { "name": "ci-x64-Debug-MSVC-asan", "description": "x64-Debug-MSVC with /fsanitize=address", @@ -265,7 +287,8 @@ "hidden": true, "inherits": [ "ci-base" - ] + ], + "configuration": "Debug" }, { "name": "ci-x64-Release-MSVC", @@ -273,7 +296,8 @@ "hidden": true, "inherits": [ "ci-base" - ] + ], + "configuration": "RelWithDebInfo" }, { "name": "ci-x64-Debug-Clang", @@ -307,6 +331,24 @@ "ci-base" ] }, + { + "name": "ci-x64-Debug-Intel", + "configurePreset": "ci-x64-Debug-Intel", + "hidden": true, + "inherits": [ + "ci-base" + ], + "configuration": "Debug" + }, + { + "name": "ci-x64-Release-Intel", + "configurePreset": "ci-x64-Release-Intel", + "hidden": true, + "inherits": [ + "ci-base" + ], + "configuration": "RelWithDebInfo" + }, { "name": "ci-x64-Debug-MSVC-asan", "configurePreset": "ci-x64-Debug-MSVC-asan", @@ -370,7 +412,8 @@ "hidden": true, "inherits": [ "ci-base" - ] + ], + "configuration": "Debug" }, { "name": "ci-x64-Release-MSVC", @@ -378,7 +421,8 @@ "hidden": true, "inherits": [ "ci-base" - ] + ], + "configuration": "RelWithDebInfo" }, { "name": "ci-x64-Debug-Clang", @@ -412,6 +456,22 @@ "ci-base" ] }, + { + "name": "ci-x64-Debug-Intel", + "configurePreset": "ci-x64-Debug-Intel", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-x64-Release-Intel", + "configurePreset": "ci-x64-Release-Intel", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, { "name": "ci-x64-Debug-MSVC-asan", "configurePreset": "ci-x64-Debug-MSVC-asan", @@ -467,7 +527,8 @@ "inherits": "ci-base", "generators": [ "ZIP" - ] + ], + "configurations": ["Release"] }, { "name": "ci-x64-Release-Clang", @@ -486,6 +547,15 @@ "generators": [ "TGZ" ] + }, + { + "name": "ci-x64-Release-Intel", + "configurePreset": "ci-x64-Release-Intel", + "hidden": true, + "inherits": "ci-base", + "generators": [ + "TGZ" + ] } ] } diff --git a/fortran/examples/ph5example.f90 b/fortran/examples/ph5example.f90 index 138969ea17a..29423b3f547 100644 --- a/fortran/examples/ph5example.f90 +++ b/fortran/examples/ph5example.f90 @@ -24,7 +24,7 @@ PROGRAM DATASET CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" ! Dataset name CHARACTER(LEN=100) :: filename ! File name - INTEGER :: fnamelen ! File name length + INTEGER :: fnamelen ! File name length INTEGER(HID_T) :: file_id ! File identifier INTEGER(HID_T) :: dset_id ! Dataset identifier INTEGER(HID_T) :: filespace ! Dataspace identifier in file @@ -76,9 +76,9 @@ PROGRAM DATASET CALL get_environment_variable("HDF5_PARAPREFIX", filename) fnamelen = LEN_TRIM(filename) if ( fnamelen == 0 ) then - filename = default_fname + filename = default_fname else - filename = filename(1:fnamelen) // "/" // default_fname + filename = filename(1:fnamelen) // "/" // default_fname endif print *, "Using filename = ", filename diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index 57e17e5a675..d054503f0d6 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -9,12 +9,15 @@ if (WIN32) if (NOT H5_HAVE_PARALLEL) set (H5_NOPAREXP ";") endif () + set (H5_NOSUBFILING ";") # Subfiling is not supported on Windows set (H5_F03EXP ";") if (NOT H5_FORTRAN_C_LONG_DOUBLE_IS_UNIQUE) set (H5_DBL_NOT_UNIQUE_EXP ";") endif () + set (CMAKE_NO_DEPRECATED_SYMBOLS 0) if (NOT H5_NO_DEPRECATED_SYMBOLS) - set (H5_NO_DEPRECATED_SYMBOLS ";") + set (CMAKE_NO_DEPRECATED_SYMBOLS 1) + set (DLL_NO_DEPRECATED_SYMBOLS ";") endif () configure_file (${HDF5_F90_SRC_SOURCE_DIR}/hdf5_fortrandll.def.in ${HDF5_F90_SRC_BINARY_DIR}/hdf5_fortrandll.def @ONLY) endif () @@ -25,8 +28,12 @@ endif () # Define Parallel variable for passing to H5config_f.inc.cmake set (CMAKE_H5_HAVE_PARALLEL 0) +set (CMAKE_H5_HAVE_SUBFILING_VFD 0) if (H5_HAVE_PARALLEL) set (CMAKE_H5_HAVE_PARALLEL 1) + if (H5_HAVE_SUBFILING_VFD) + set (CMAKE_H5_HAVE_SUBFILING_VFD 1) + endif () endif () set (CMAKE_H5_HAVE_FLOAT128 0) @@ -341,8 +348,6 @@ if (BUILD_STATIC_LIBS) ${LINK_Fortran_LIBS} "$<$:${MPI_Fortran_LIBRARIES}>" ) -# set_property(TARGET ${HDF5_F90_LIB_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-SUBSYSTEM:CONSOLE>") -# set_property(TARGET ${HDF5_F90_LIB_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:${WIN_LINK_FLAGS}>") if(MSVC) set_property(TARGET ${HDF5_F90_LIB_TARGET} PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") endif() @@ -358,7 +363,12 @@ if (BUILD_STATIC_LIBS) endif () if (BUILD_SHARED_LIBS) - add_library (${HDF5_F90_LIBSH_TARGET} SHARED ${f90_F_SOURCES_SHARED}) + if(WIN32) + set(DLLDEF ${HDF5_F90_SRC_BINARY_DIR}/hdf5_fortrandll.def) + else() + set(DLLDEF "") + endif() + add_library (${HDF5_F90_LIBSH_TARGET} SHARED ${DLLDEF} ${f90_F_SOURCES_SHARED}) target_include_directories (${HDF5_F90_LIBSH_TARGET} PRIVATE "${HDF5_F90_SRC_SOURCE_DIR};${CMAKE_Fortran_MODULE_DIRECTORY}/shared;${HDF5_F90_BINARY_DIR};$<$:${MPI_Fortran_INCLUDE_DIRS}>" INTERFACE "$/${HDF5_INSTALL_MODULE_DIR}/shared>" @@ -374,12 +384,8 @@ if (BUILD_SHARED_LIBS) PUBLIC ${HDF5_F90_C_LIBSH_TARGET} PRIVATE ${LINK_Fortran_LIBS} "$<$:${MPI_Fortran_LIBRARIES}>" ) -# set_property(TARGET ${HDF5_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-SUBSYSTEM:CONSOLE>") -# set_property(TARGET ${HDF5_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:${WIN_LINK_FLAGS}>") -# set_property(TARGET ${HDF5_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-DLL>") -# set_property(TARGET ${HDF5_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-DEF:${HDF5_F90_SRC_BINARY_DIR}/hdf5_fortrandll.def>") if(MSVC) - set_property(TARGET ${HDF5_F90_LIBSH_TARGET} PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS} /DLL /DEF:${HDF5_F90_SRC_BINARY_DIR}/hdf5_fortrandll.def") + set_property(TARGET ${HDF5_F90_LIBSH_TARGET} PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") endif() set_target_properties (${HDF5_F90_LIBSH_TARGET} PROPERTIES FOLDER libraries/fortran diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index 576509534ae..afd17f3dc32 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -5342,7 +5342,7 @@ END SUBROUTINE h5pget_fapl_ioc_f !! \brief Retrieves local and global causes that broke collective I/O on the last parallel I/O call. !! !! \param plist_id Dataset transfer property list identifier -!! \param local_no_collective_cause An enumerated set value indicating the causes that prevented collective I/O in the local process +!! \param local_no_collective_cause An enumerated set value indicating the causes that prevented collective I/O in the local process !! \param global_no_collective_cause An enumerated set value indicating the causes across all processes that prevented collective I/O !! \param hdferr \fortran_error !! diff --git a/fortran/src/H5config_f.inc.cmake b/fortran/src/H5config_f.inc.cmake index 71bce0e18c2..0f274dbcbd4 100644 --- a/fortran/src/H5config_f.inc.cmake +++ b/fortran/src/H5config_f.inc.cmake @@ -12,33 +12,39 @@ ! fortran/src/H5config_f.inc. Generated from fortran/src/H5config_f.inc.in by configure ! Define if there is parallel support -#cmakedefine01 H5_HAVE_PARALLEL -#if H5_HAVE_PARALLEL == 0 +#cmakedefine01 CMAKE_H5_HAVE_PARALLEL +#if CMAKE_H5_HAVE_PARALLEL == 0 #undef H5_HAVE_PARALLEL +#else +#define H5_HAVE_PARALLEL #endif ! Define if there is subfiling support -#cmakedefine01 H5_HAVE_SUBFILING_VFD -#if H5_HAVE_SUBFILING_VFD == 0 +#cmakedefine01 CMAKE_H5_HAVE_SUBFILING_VFD +#if CMAKE_H5_HAVE_SUBFILING_VFD == 0 #undef H5_HAVE_SUBFILING_VFD +#else +#define H5_HAVE_SUBFILING_VFD #endif ! Define if on APPLE #cmakedefine01 H5_HAVE_DARWIN #if H5_HAVE_DARWIN == 0 #undef H5_HAVE_DARWIN +#else +#define H5_HAVE_DARWIN #endif ! Define if the intrinsic function STORAGE_SIZE exists -#define H5_FORTRAN_HAVE_STORAGE_SIZE @H5_FORTRAN_HAVE_STORAGE_SIZE@ +#cmakedefine H5_FORTRAN_HAVE_STORAGE_SIZE @H5_FORTRAN_HAVE_STORAGE_SIZE@ ! Define if the intrinsic function SIZEOF exists -#define H5_FORTRAN_HAVE_SIZEOF @H5_FORTRAN_HAVE_SIZEOF@ +#cmakedefine H5_FORTRAN_HAVE_SIZEOF @H5_FORTRAN_HAVE_SIZEOF@ ! Define if the intrinsic function C_SIZEOF exists -#define H5_FORTRAN_HAVE_C_SIZEOF @H5_FORTRAN_HAVE_C_SIZEOF@ +#cmakedefine H5_FORTRAN_HAVE_C_SIZEOF @H5_FORTRAN_HAVE_C_SIZEOF@ -! Define if the intrinsic C_LONG_DOUBLE exists +! Define if the intrinsic function C_LONG_DOUBLE exists #define H5_FORTRAN_HAVE_C_LONG_DOUBLE @H5_FORTRAN_HAVE_C_LONG_DOUBLE@ ! Define if Fortran C_LONG_DOUBLE is different from C_DOUBLE @@ -47,17 +53,17 @@ ! Define if the intrinsic module ISO_FORTRAN_ENV exists #define H5_HAVE_ISO_FORTRAN_ENV @H5_HAVE_ISO_FORTRAN_ENV@ -! should this be ${HDF_PREFIX} instead of H5 MSB +! Define the size of C's double #define H5_SIZEOF_DOUBLE @H5_SIZEOF_DOUBLE@ -! should this be ${HDF_PREFIX} instead of H5 MSB +! Define the size of C's long double #define H5_SIZEOF_LONG_DOUBLE @H5_SIZEOF_LONG_DOUBLE@ ! Define the maximum decimal precision for reals #define H5_PAC_FC_MAX_REAL_PRECISION @H5_PAC_FC_MAX_REAL_PRECISION@ ! If C has quad precision -#define H5_HAVE_FLOAT128 @H5_HAVE_FLOAT128@ +#cmakedefine H5_HAVE_FLOAT128 @H5_HAVE_FLOAT128@ ! Define if INTEGER*16 is available #define H5_HAVE_Fortran_INTEGER_SIZEOF_16 @H5_HAVE_Fortran_INTEGER_SIZEOF_16@ @@ -84,7 +90,9 @@ #define H5_Fortran_COMPILER_ID @CMAKE_Fortran_COMPILER_ID@ ! Define if deprecated public API symbols are disabled -#cmakedefine01 H5_NO_DEPRECATED_SYMBOLS -#if H5_NO_DEPRECATED_SYMBOLS == 0 +#cmakedefine01 CMAKE_NO_DEPRECATED_SYMBOLS +#if CMAKE_NO_DEPRECATED_SYMBOLS == 0 #undef H5_NO_DEPRECATED_SYMBOLS +#else +#define H5_NO_DEPRECATED_SYMBOLS #endif diff --git a/fortran/src/H5config_f.inc.in b/fortran/src/H5config_f.inc.in index 991e4b0750b..3aeded931d9 100644 --- a/fortran/src/H5config_f.inc.in +++ b/fortran/src/H5config_f.inc.in @@ -41,7 +41,7 @@ ! Define if the intrinsic module ISO_FORTRAN_ENV exists #undef HAVE_ISO_FORTRAN_ENV -! Define the size of C's long double +! Define the size of C's double #undef SIZEOF_DOUBLE ! Define the size of C's long double diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index a8399a38ca7..55f4f2b579b 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -16,7 +16,7 @@ H5A_mp_H5AREAD_CHAR_SCALAR H5A_mp_H5ACREATE_F H5A_mp_H5ACREATE_ASYNC_F H5A_mp_H5AOPEN_NAME_F -@H5_NO_DEPRECATED_SYMBOLS@H5A_mp_H5AOPEN_IDX_F +@DLL_NO_DEPRECATED_SYMBOLS@H5A_mp_H5AOPEN_IDX_F H5A_mp_H5AGET_SPACE_F H5A_mp_H5AGET_TYPE_F H5A_mp_H5AGET_NAME_F @@ -253,7 +253,9 @@ H5O_mp_H5OVISIT_BY_NAME_F H5O_mp_H5OVISIT_F H5O_mp_H5OTOKEN_CMP_F ; H5P +H5P_mp_H5PCLOSE_F H5P_mp_H5PCREATE_F +H5P_mp_H5PCREATE_CLASS_F H5P_mp_H5PSET_PRESERVE_F H5P_mp_H5PGET_PRESERVE_F H5P_mp_H5PGET_CLASS_F @@ -419,10 +421,10 @@ H5P_mp_H5PGET_ACTUAL_SELECTION_IO_MODE_F ; Parallel @H5_NOPAREXP@H5P_mp_H5PSET_FAPL_MPIO_F @H5_NOPAREXP@H5P_mp_H5PGET_FAPL_MPIO_F -@H5_NOPAREXP@H5P_mp_H5PSET_FAPL_SUBFILING_F -@H5_NOPAREXP@H5P_mp_H5PGET_FAPL_SUBFILING_F -@H5_NOPAREXP@H5P_mp_H5PSET_FAPL_IOC_F -@H5_NOPAREXP@H5P_mp_H5PGET_FAPL_IOC_F +@H5_NOPAREXP@@H5_NOSUBFILING@H5P_mp_H5PSET_FAPL_SUBFILING_F +@H5_NOPAREXP@@H5_NOSUBFILING@H5P_mp_H5PGET_FAPL_SUBFILING_F +@H5_NOPAREXP@@H5_NOSUBFILING@H5P_mp_H5PSET_FAPL_IOC_F +@H5_NOPAREXP@@H5_NOSUBFILING@H5P_mp_H5PGET_FAPL_IOC_F @H5_NOPAREXP@H5P_mp_H5PSET_MPI_PARAMS_F @H5_NOPAREXP@H5P_mp_H5PGET_MPI_PARAMS_F @H5_NOPAREXP@H5P_mp_H5PSET_DXPL_MPIO_F diff --git a/fortran/test/tH5T_F03.F90 b/fortran/test/tH5T_F03.F90 index 39845971644..08dfa226168 100644 --- a/fortran/test/tH5T_F03.F90 +++ b/fortran/test/tH5T_F03.F90 @@ -984,7 +984,7 @@ SUBROUTINE test_h5kind_to_type(total_error) INTEGER, PARAMETER :: real_kind_15 = C_DOUBLE !should map to REAL*8 on most modern processors ! Check if C has quad precision extension -#if H5_HAVE_FLOAT128!=0 +#ifdef H5_HAVE_FLOAT128 ! Check if Fortran supports quad precision # if H5_PAC_FC_MAX_REAL_PRECISION > 26 INTEGER, PARAMETER :: real_kind_31 = SELECTED_REAL_KIND(31) @@ -3400,7 +3400,7 @@ SUBROUTINE multiple_dset_rw(total_error) !------------------------------------------------------------------------- ! Subroutine: multiple_dset_rw ! -! Purpose: Tests the reading and writing of multiple datasets +! Purpose: Tests the reading and writing of multiple datasets ! using H5Dread_multi and H5Dwrite_multi ! ! Return: Success: 0 @@ -3408,10 +3408,10 @@ SUBROUTINE multiple_dset_rw(total_error) !------------------------------------------------------------------------- ! IMPLICIT NONE - + INTEGER, INTENT(INOUT) :: total_error ! number of errors INTEGER :: error ! HDF hdferror flag - + INTEGER(SIZE_T), PARAMETER :: ndset = 5 ! Number of data sets INTEGER(HID_T), DIMENSION(:), ALLOCATABLE :: dset_id INTEGER(HID_T), DIMENSION(:), ALLOCATABLE :: mem_type_id @@ -3424,9 +3424,9 @@ SUBROUTINE multiple_dset_rw(total_error) INTEGER, PARAMETER :: sdim=2 ! length of character string INTEGER, PARAMETER :: ddim=2 ! size of derived type array INTEGER :: i,j,k - + TYPE(C_PTR), ALLOCATABLE, DIMENSION(:) :: buf_md ! array to hold the multi-datasets - + INTEGER, DIMENSION(1:idim), TARGET :: wbuf_int ! integer write buffer INTEGER, DIMENSION(1:idim,idim2,idim3), TARGET :: wbuf_intmd REAL, DIMENSION(1:rdim), TARGET :: wbuf_real ! real write buffer @@ -3535,7 +3535,7 @@ SUBROUTINE multiple_dset_rw(total_error) CALL check("h5tinsert_f", error, total_error) CALL h5tcopy_f(H5T_NATIVE_CHARACTER, strtype, error) CALL check("h5tcopy_f", error, total_error) - CALL h5tset_size_f(strtype, INT(sdim,size_t), error) + CALL h5tset_size_f(strtype, INT(sdim,size_t), error) CALL check("h5tset_size_f", error, total_error) CALL h5tinsert_f(mem_type_id(4), "chr", & H5OFFSETOF(C_LOC(wbuf_derived(1)),C_LOC(wbuf_derived(1)%c(1:1))), strtype, error) diff --git a/hl/fortran/src/CMakeLists.txt b/hl/fortran/src/CMakeLists.txt index ed469e57753..f225eb8f3b4 100644 --- a/hl/fortran/src/CMakeLists.txt +++ b/hl/fortran/src/CMakeLists.txt @@ -22,9 +22,6 @@ if (WIN32 AND MSVC) set_target_properties (H5HL_buildiface PROPERTIES COMPILE_FLAGS "/MT") endif () endif () -#set_property(TARGET H5HL_buildiface APPEND PROPERTY -# LINK_FLAGS "$<$:-SUBSYSTEM:CONSOLE>" -#) if(MSVC) set_property(TARGET H5HL_buildiface PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE") endif() @@ -182,8 +179,6 @@ if (BUILD_STATIC_LIBS) PRIVATE "$<$:${WIN_COMPILE_FLAGS}>" ) target_link_libraries (${HDF5_HL_F90_LIB_TARGET} PUBLIC ${HDF5_HL_F90_C_LIB_TARGET} ${HDF5_F90_LIB_TARGET}) -# set_property(TARGET ${HDF5_HL_F90_LIB_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-SUBSYSTEM:CONSOLE>") -# set_property(TARGET ${HDF5_HL_F90_LIB_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:${WIN_LINK_FLAGS}>") if(MSVC) set_property(TARGET ${HDF5_HL_F90_LIB_TARGET} PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") endif() @@ -198,7 +193,12 @@ if (BUILD_STATIC_LIBS) add_dependencies(${HDF5_HL_F90_LIB_TARGET} H5HLgen) endif () if (BUILD_SHARED_LIBS) - add_library (${HDF5_HL_F90_LIBSH_TARGET} SHARED ${HDF5_HL_F90_F_SOURCES_SHARED}) + if(WIN32) + set(DLLDEF ${HDF5_HL_F90_SRC_BINARY_DIR}/hdf5_hl_fortrandll.def) + else() + set(DLLDEF "") + endif() + add_library (${HDF5_HL_F90_LIBSH_TARGET} SHARED ${DLLDEF} ${HDF5_HL_F90_F_SOURCES_SHARED}) target_include_directories (${HDF5_HL_F90_LIBSH_TARGET} PRIVATE "${HDF5_F90_BINARY_DIR};${CMAKE_Fortran_MODULE_DIRECTORY}/shared;$<$:${MPI_Fortran_INCLUDE_DIRS}>" INTERFACE "$/${HDF5_INSTALL_MODULE_DIR}/shared>" @@ -214,12 +214,8 @@ if (BUILD_SHARED_LIBS) PUBLIC ${HDF5_HL_F90_C_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} PRIVATE ${LINK_Fortran_LIBS} ) -# set_property(TARGET ${HDF5_HL_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-SUBSYSTEM:CONSOLE>") -# set_property(TARGET ${HDF5_HL_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:${WIN_LINK_FLAGS}>") -# set_property(TARGET ${HDF5_HL_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-DLL>") -# set_property(TARGET ${HDF5_HL_F90_LIBSH_TARGET} APPEND PROPERTY LINK_FLAGS "$<$:-DEF:${HDF5_HL_F90_SRC_BINARY_DIR}/hdf5_hl_fortrandll.def>") if(MSVC) - set_property(TARGET ${HDF5_HL_F90_LIBSH_TARGET} PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS} -DLL -DEF:${HDF5_HL_F90_SRC_BINARY_DIR}/hdf5_hl_fortrandll.def") + set_property(TARGET ${HDF5_HL_F90_LIBSH_TARGET} PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}") endif() set_target_properties (${HDF5_HL_F90_LIBSH_TARGET} PROPERTIES FOLDER libraries/hl/fortran diff --git a/hl/fortran/src/H5LTff.F90 b/hl/fortran/src/H5LTff.F90 index 8b4e6d369ea..80e9fe7e350 100644 --- a/hl/fortran/src/H5LTff.F90 +++ b/hl/fortran/src/H5LTff.F90 @@ -1115,7 +1115,7 @@ SUBROUTINE h5ltset_attribute_int_f(loc_id,& f_ptr = C_LOC(buf(1:1)) -#if H5_FORTRAN_HAVE_STORAGE_SIZE!=0 +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE SizeOf_buf_type = STORAGE_SIZE(buf(1), c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t) #else SizeOf_buf_type = SIZEOF(buf(1)) @@ -1165,7 +1165,7 @@ SUBROUTINE h5ltset_attribute_float_f(loc_id,& INTEGER(size_t) :: SizeOf_buf_type f_ptr = C_LOC(buf(1)) -#if H5_FORTRAN_HAVE_STORAGE_SIZE!=0 +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE SizeOf_buf_type = STORAGE_SIZE(buf(1), c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t) #else SizeOf_buf_type = SIZEOF(buf(1)) @@ -1216,7 +1216,7 @@ SUBROUTINE h5ltset_attribute_double_f(loc_id,& f_ptr = C_LOC(buf(1)) -#if H5_FORTRAN_HAVE_STORAGE_SIZE!=0 +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE SizeOf_buf_type = STORAGE_SIZE(buf(1), c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t) #else SizeOf_buf_type = SIZEOF(buf(1)) @@ -1264,7 +1264,7 @@ SUBROUTINE h5ltset_attribute_string_f(loc_id,& f_ptr = C_LOC(buf(1)(1:1)) -#if H5_FORTRAN_HAVE_STORAGE_SIZE!=0 +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE SizeOf_buf_type = STORAGE_SIZE(buf(1)(1:1), c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t) #else SizeOf_buf_type = SIZEOF(buf(1:1)(1:1)) @@ -1363,7 +1363,7 @@ SUBROUTINE h5ltget_attribute_int_f(loc_id,& f_ptr = C_LOC(buf(1)) -#if H5_FORTRAN_HAVE_STORAGE_SIZE!=0 +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE SizeOf_buf = STORAGE_SIZE(buf(1), c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t) #else SizeOf_buf = SIZEOF(buf(1)) @@ -1407,7 +1407,7 @@ SUBROUTINE h5ltget_attribute_float_f(loc_id,& INTEGER(size_t) :: SizeOf_buf f_ptr = C_LOC(buf(1)) -#if H5_FORTRAN_HAVE_STORAGE_SIZE!=0 +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE SizeOf_buf = STORAGE_SIZE(buf(1), c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t) #else SizeOf_buf = SIZEOF(buf(1)) @@ -1451,7 +1451,7 @@ SUBROUTINE h5ltget_attribute_double_f(loc_id,& INTEGER(size_t) :: SizeOf_buf f_ptr = C_LOC(buf(1)) -#if H5_FORTRAN_HAVE_STORAGE_SIZE!=0 +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE SizeOf_buf = STORAGE_SIZE(buf(1), c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t) #else SizeOf_buf = SIZEOF(buf(1)) From afb1f3c19ace049faa1404b312b4ff44560a03f6 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Fri, 10 Nov 2023 11:14:07 -0600 Subject: [PATCH 092/101] Remove printf format warning on Windows oneAPI. (#3838) --- test/accum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/accum.c b/test/accum.c index 5a19efeebf5..9876998d9b4 100644 --- a/test/accum.c +++ b/test/accum.c @@ -2194,7 +2194,7 @@ test_swmr_write_big(bool newest_format) ZeroMemory(&pi, sizeof(pi)); if (0 == CreateProcess(NULL, SWMR_READER, NULL, NULL, false, 0, NULL, NULL, &si, &pi)) { - printf("CreateProcess failed (%d).\n", GetLastError()); + printf("CreateProcess failed (%lu).\n", GetLastError()); FAIL_STACK_ERROR; } From 5e0b59a70cc093b6ce17820a78de410201867226 Mon Sep 17 00:00:00 2001 From: mattjala <124107509+mattjala@users.noreply.github.com> Date: Fri, 10 Nov 2023 22:34:46 -0600 Subject: [PATCH 093/101] Test and document path handling of H5Lcreate_* API (#3829) --- src/H5Lpublic.h | 27 ++++-- test/links.c | 250 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 270 insertions(+), 7 deletions(-) diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index 03b47c540fa..2bf3c53b83e 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -260,9 +260,13 @@ H5_DLL herr_t H5Lcopy(hid_t src_loc, const char *src_name, hid_t dst_loc, const * location and name, respectively, of the new hard link. * * \p cur_name and \p dst_name are interpreted relative to \p cur_loc - * and \p dst_loc, respectively. If \p cur_loc and \p dst_loc are the - * same location, the HDF5 macro #H5L_SAME_LOC can be used for either - * parameter (but not both). + * and \p dst_loc, respectively. If a given name begins with \c /, + * then it will be interpreted as absolute path in the file. + * The names of the created links will be the last element of + * each provided path. Prior elements in each path are used to + * locate the parent groups of each new link. If \p cur_loc and + * \p dst_loc are the same location, the HDF5 macro + * #H5L_SAME_LOC can be used for either parameter (but not both). * * \p lcpl_id and \p lapl_id are the link creation and access property * lists associated with the new link. @@ -321,8 +325,10 @@ H5_DLL herr_t H5Lcreate_hard_async(hid_t cur_loc_id, const char *cur_name, hid_t * * \p link_loc_id and \p link_name specify the location and name, * respectively, of the new soft link. \p link_name is interpreted - * relative to \p link_loc_id and must contain only the name of the soft - * link; \p link_name may not contain any additional path elements. + * as a path relative to \p link_loc_id, or an absolute path if it + * begins with \c /. The name of the created link will be the last + * element of the provided path. Prior elements in the path are + * used to locate the parent group of the new link. * * If \p link_loc_id is a group identifier, the object pointed to by * \p link_name will be accessed as a member of that group. If @@ -1190,7 +1196,11 @@ H5_DLL herr_t H5Lvisit_by_name2(hid_t loc_id, const char *group_name, H5_index_t * named \p link_name at the location specified in \p link_loc_id with * user-specified data \p udata. * - * \p link_name is interpreted relative to \p link_loc_id. + * \p link_name is interpreted relative to \p link_loc_id. If + * \p link_name begins with \c /, then it will be interpreted as + * an absolute path in the file. The name of the created link + * will be the last element of the provided path. Prior elements + * in the path are used to locate the parent group of the new link. * * Valid values for the link class of the new link, \p link_type, * include #H5L_TYPE_EXTERNAL and any user-defined link classes that @@ -1307,7 +1317,10 @@ H5_DLL herr_t H5Lunpack_elink_val(const void *ext_linkval /*in*/, size_t link_si * * \p link_loc_id and \p link_name specify the location and name, * respectively, of the new link. \p link_name is interpreted relative - * to \p link_loc_id. + * to \p link_loc_id. If \p link_name begins with \c /, then it is + * interpreted as an absolute path in the file. The name of the created + * link will be the last element of the provided path. Prior elements in + * the path are used to locate the parent group of the new link. * * \p lcpl_id is the link creation property list used in creating the * new link. diff --git a/test/links.c b/test/links.c index 99e011402c0..e068d71be8e 100644 --- a/test/links.c +++ b/test/links.c @@ -22623,6 +22623,253 @@ timestamps(hid_t fapl) return FAIL; } /* end timestamps() */ +/*------------------------------------------------------------------------- + * Function: link_path_handling + * + * Purpose: Create hard and soft links by relative and absolute paths + * + * Return: Success: 0 + * Failure: -1 + *------------------------------------------------------------------------- + */ +static int +link_path_handling(hid_t fapl, bool new_format) +{ + + hid_t file_id = (H5I_INVALID_HID); + hid_t grp1 = (H5I_INVALID_HID), grp2 = (H5I_INVALID_HID); + char filename[NAME_BUF_SIZE]; + + if (new_format) + TESTING("H5Lcreate path handling (w/new group format)"); + else + TESTING("H5Lcreate path handling"); + + /* Create file */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR; + + /* Create two groups */ + if ((grp1 = H5Gcreate2(file_id, "grp1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + if ((grp2 = H5Gcreate2(grp1, "grp2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Create hard link to grp1 that resides in grp2 by relative path */ + if (H5Lcreate_hard(file_id, "grp1", grp1, "grp2/relative_hard_link", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "relative_hard_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Create soft link to grp1 that resides in grp2 by relative path */ + if (H5Lcreate_soft("/grp1", grp1, "grp2/relative_soft_link", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "relative_soft_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Create hard link to grp1 that resides in grp2 by absolute path */ + if (H5Lcreate_hard(file_id, "grp1", grp1, "/grp1/grp2/absolute_hard_link", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "absolute_hard_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Create soft link to grp1 that resides in grp2 by absolute path */ + if (H5Lcreate_soft("/grp1", grp1, "/grp1/grp2/absolute_soft_link", H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "absolute_soft_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Close groups and file */ + if (H5Gclose(grp1) < 0) + TEST_ERROR; + if (H5Gclose(grp2) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Gclose(grp1); + H5Gclose(grp2); + H5Fclose(file_id); + } + H5E_END_TRY + return FAIL; +} + +/*------------------------------------------------------------------------- + * Function: ext_link_path_handling + * + * Purpose: Create external links by relative and absolute paths + * + * Return: Success: 0 + * Failure: -1 + *------------------------------------------------------------------------- + */ +static int +ext_link_path_handling(hid_t fapl, bool new_format) +{ + + hid_t file_id = (H5I_INVALID_HID); + hid_t grp1 = (H5I_INVALID_HID), grp2 = (H5I_INVALID_HID); + char filename[NAME_BUF_SIZE]; + + if (new_format) + TESTING("H5Lcreate external link path handling (w/new group format)"); + else + TESTING("H5Lcreate external link path handling"); + + /* Create file */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR; + + /* Create two groups */ + if ((grp1 = H5Gcreate2(file_id, "grp1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + if ((grp2 = H5Gcreate2(grp1, "grp2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Create external link to nonexistent object by relative path */ + if (H5Lcreate_external("nonexistent_file", "nonexistent_object", grp1, "grp2/relative_ext_link", + H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "relative_ext_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Create external link to nonexistent object by absolute path */ + if (H5Lcreate_external("nonexistent_file", "nonexistent_object", grp1, "/grp1/grp2/relative_soft_link", + H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "relative_soft_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Close groups and file */ + if (H5Gclose(grp1) < 0) + TEST_ERROR; + if (H5Gclose(grp2) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + + PASSED(); + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Gclose(grp1); + H5Gclose(grp2); + H5Fclose(file_id); + } + H5E_END_TRY + return FAIL; +} + +/*------------------------------------------------------------------------- + * Function: ud_link_path_handling + * + * Purpose: Create user-defined links by relative and absolute paths + * + * Return: Success: 0 + * Failure: -1 + *------------------------------------------------------------------------- + */ +static int +ud_link_path_handling(hid_t fapl, bool new_format) +{ + + hid_t file_id = (H5I_INVALID_HID); + hid_t grp1 = (H5I_INVALID_HID), grp2 = (H5I_INVALID_HID); + char filename[NAME_BUF_SIZE]; + H5L_info2_t li; + + if (new_format) + TESTING("H5Lcreate ud link path handling (w/new group format)"); + else + TESTING("H5Lcreate ud link path handling"); + + /* Create file */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR; + + /* Create two groups */ + if ((grp1 = H5Gcreate2(file_id, "grp1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + if ((grp2 = H5Gcreate2(grp1, "grp2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Check that UD hard links are not registered */ + if (H5Lis_registered((H5L_type_t)UD_HARD_TYPE) != false) + TEST_ERROR; + + /* Register "user-defined hard links" with the library */ + if (H5Lregister(UD_hard_class) < 0) + TEST_ERROR; + + /* Check that UD hard links are registered */ + if (H5Lis_registered((H5L_type_t)UD_HARD_TYPE) != true) + TEST_ERROR; + + if (H5Lget_info2(file_id, "grp1", &li, H5P_DEFAULT) < 0) + TEST_ERROR; + + /* Create user-defined (hard) link to grp1 by relative path */ + if (H5Lcreate_ud(grp1, "grp2/relative_ud_link", (H5L_type_t)UD_HARD_TYPE, &(li.u.token), + sizeof(li.u.token), H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "relative_ud_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Create user-defined (hard) link to grp1 by absolute path */ + if (H5Lcreate_ud(grp1, "/grp1/grp2/absolute_ud_link", (H5L_type_t)UD_HARD_TYPE, &(li.u.token), + sizeof(li.u.token), H5P_DEFAULT, H5P_DEFAULT) < 0) + TEST_ERROR; + + if (H5Lexists(grp2, "absolute_ud_link", H5P_DEFAULT) <= 0) + TEST_ERROR; + + /* Close groups and file */ + if (H5Gclose(grp1) < 0) + TEST_ERROR; + if (H5Gclose(grp2) < 0) + TEST_ERROR; + if (H5Fclose(file_id) < 0) + TEST_ERROR; + if (H5Lunregister((H5L_type_t)UD_HARD_TYPE) < 0) + TEST_ERROR; + + PASSED(); + return SUCCEED; + +error: + H5E_BEGIN_TRY + { + H5Gclose(grp1); + H5Gclose(grp2); + H5Fclose(file_id); + H5Lunregister((H5L_type_t)UD_HARD_TYPE); + } + H5E_END_TRY + return FAIL; +} + /*------------------------------------------------------------------------- * Function: main * @@ -22697,6 +22944,7 @@ main(void) nerrors += ck_new_links(my_fapl, new_format) < 0 ? 1 : 0; nerrors += long_links(my_fapl, new_format) < 0 ? 1 : 0; nerrors += toomany(my_fapl, new_format) < 0 ? 1 : 0; + nerrors += link_path_handling(my_fapl, new_format) < 0 ? 1 : 0; /* Test new H5L link creation routine */ nerrors += test_lcpl(my_fapl, new_format); @@ -22804,6 +23052,7 @@ main(void) nerrors += external_open_twice(my_fapl, new_format) < 0 ? 1 : 0; nerrors += external_link_with_committed_datatype(my_fapl, new_format) < 0 ? 1 : 0; nerrors += external_link_public_macros(my_fapl, new_format) < 0 ? 1 : 0; + nerrors += ext_link_path_handling(my_fapl, new_format) < 0 ? 1 : 0; } /* with/without external file cache */ } @@ -22826,6 +23075,7 @@ main(void) nerrors += ud_callbacks_deprec(my_fapl, new_format) < 0 ? 1 : 0; #endif /* H5_NO_DEPRECATED_SYMBOLS */ nerrors += ud_link_errors(my_fapl, new_format) < 0 ? 1 : 0; + nerrors += ud_link_path_handling(my_fapl, new_format) < 0 ? 1 : 0; nerrors += lapl_udata(my_fapl, new_format) < 0 ? 1 : 0; nerrors += lapl_nlinks(my_fapl, new_format) < 0 ? 1 : 0; #ifndef H5_NO_DEPRECATED_SYMBOLS From a6e610eb639f05da58246e1ccaa55cec4f679008 Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Fri, 10 Nov 2023 22:36:06 -0600 Subject: [PATCH 094/101] Correct ENV variables (#3841) --- config/cmake/examples/HDF5_Examples.cmake.in | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/config/cmake/examples/HDF5_Examples.cmake.in b/config/cmake/examples/HDF5_Examples.cmake.in index 2f3a6491a90..9c997191630 100644 --- a/config/cmake/examples/HDF5_Examples.cmake.in +++ b/config/cmake/examples/HDF5_Examples.cmake.in @@ -53,7 +53,7 @@ set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCTEST_CONFIGURATION_TYPE:STRING=${ if(NOT DEFINED INSTALLDIR) if(WIN32) - set(INSTALLDIR "%ProgramFiles%/HDF_Group/@HDF5_PACKAGE_NAME@/@HDF5_PACKAGE_VERSION@") + set(INSTALLDIR "\"%ProgramFiles%/HDF_Group/@HDF5_PACKAGE_NAME@/@HDF5_PACKAGE_VERSION@\"") else() set(INSTALLDIR "@CMAKE_INSTALL_PREFIX@") endif() @@ -78,6 +78,7 @@ set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDN ############################################################################################################### set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ROOT:PATH=${INSTALLDIR}") +set (ENV{HDF5_ROOT} "${INSTALLDIR}") if(WIN32) set(SITE_OS_NAME "Windows") set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build) @@ -98,11 +99,11 @@ endif() set(ENV{HDF5_PLUGIN_PATH} "${INSTALLDIR}/lib/plugin") set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@") ### use a toolchain file (supported everywhere) #### -if(NOT DEFINED CTEST_TOOLCHAIN_FILE) - set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=@CTEST_TOOLCHAIN_FILE@") -else() - set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=${CTEST_TOOLCHAIN_FILE}") -endif() +#if(NOT DEFINED CTEST_TOOLCHAIN_FILE) +# set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=@CTEST_TOOLCHAIN_FILE@") +#else() +# set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=${CTEST_TOOLCHAIN_FILE}") +#endif() ############################################################################################################### # For any comments please contact cdashhelp@hdfgroup.org From 0bf85494c1487a1e9292128d09ac4bc9848543d9 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 13 Nov 2023 10:22:15 -0800 Subject: [PATCH 095/101] Remove 1.12 from README.md (#3847) Also remove 1.14.3 info since that was already released. --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 043351e2316..28d8d7a1b4d 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,6 @@ HDF5 version 1.15.0 currently under development [![h5py build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/h5py.yml?branch=develop&label=h5py)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) [![CVE regression](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cve.yml?branch=develop&label=CVE)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) [![1.14 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_14&label=1.14)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_14) -[![1.12 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_12&label=1.12)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_12) [![BSD](https://img.shields.io/badge/License-BSD-blue.svg)](https://github.com/HDFGroup/hdf5/blob/develop/COPYING) [HPC configure/build/test results](https://my.cdash.org/index.php?project=HDF5) @@ -87,8 +86,6 @@ are tentative. | Release | New Features | | ------- | ------------ | -| 1.12.3 | CVE fixes, performance improvements, H5Dchunk\_iter(), last HDF5 1.12 release | -| 1.14.3 | CVE-free!, better cross-compile support | | 1.14.4 | S3 VFD improvements | | TBD | VFD SWMR | | 2.0.0 | TBD | From 8b3ffdef3099d2699ec71a5f855966132b3d3c25 Mon Sep 17 00:00:00 2001 From: Dana Robinson <43805+derobins@users.noreply.github.com> Date: Mon, 13 Nov 2023 11:09:43 -0800 Subject: [PATCH 096/101] Remove Autotools sed hack (#3848) configure.ac contains a sed line that cleans up incorrect library flags which was added to paper over some bugs in earlier versions of the Autotools. These issues are not a problem with the current versions of the Autootols. The sed line causes problems on MacOS, so it has been removed. Fixes #3843 --- configure.ac | 6 ------ release_docs/RELEASE.txt | 10 ++++++++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/configure.ac b/configure.ac index 30f5d2caeaa..eace890ffae 100644 --- a/configure.ac +++ b/configure.ac @@ -4285,11 +4285,5 @@ if test "X$HDF_FORTRAN" = "Xyes"; then esac fi -## HDF5 configure code created by autotools with gcc 4.9.2 is adding problematic -## linker flags: -l with no library name; -l , specifically gfortran or m. -## This sed script corrects "-l " first and then "-l " with no library name. -## If the order is not preserved, all instances of "-l " will be removed. -sed -e '/^postdeps/ s/-l \([a-zA-Z]\)/-l\1/g' -e '/^postdeps/ s/-l //g' -i libtool - ## show the configure settings cat src/libhdf5.settings diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 145457400bc..66355a0d0e5 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -835,6 +835,16 @@ Bug Fixes since HDF5-1.14.0 release Configuration ------------- + - Removed an Autotools configure hack that causes problems on MacOS + + A sed line in configure.ac was added in the past to paper over some + problems with older versions of the Autotools that would add incorrect + linker flags. This hack is not needed with recent versions of the + Autotools and the sed line errors on MacOS (though this was a silent + error that didn't break the build) so the hack has been removed. + + Fixes GitHub issue #3843 + - Fixed an issue where the h5tools_test_utils test program was being installed on the system for Autotools builds of HDF5 From 28d2b6771f41396f1e243e00cb9dd57c4c891613 Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Mon, 13 Nov 2023 13:49:38 -0600 Subject: [PATCH 097/101] HDF5 API test updates (#3835) * HDF5 API test updates Removed test duplication from bringing API tests back into the library from external VOL tests repo Synced changes between API tests and library's tests Updated API tests CMake code to directly use and install testhdf5, testphdf5, etc. instead of creating duplicate binaries Added new h5_using_native_vol() test function to determine whether the VOL connector being used is (or the VOL connector stack being used resolves to) the native VOL connector * Remove duplicate variable --- test/API/CMakeLists.txt | 500 +- test/API/H5_api_dataset_test.c | 8 +- test/API/tarray.c | 2247 --- test/API/tattr.c | 11923 -------------- test/API/tchecksum.c | 246 - test/API/tconfig.c | 181 - test/API/tcoords.c | 718 - test/API/testhdf5.c | 716 - test/API/testhdf5.h | 351 - test/API/tfile.c | 8369 ---------- test/API/tgenprop.c | 2195 --- test/API/th5o.c | 1886 --- test/API/th5s.c | 3542 ---- test/API/tid.c | 1413 -- test/API/titerate.c | 1260 -- test/API/tmisc.c | 6343 -------- test/API/trefer.c | 3636 ----- test/API/tselect.c | 16308 ------------------- test/API/ttime.c | 225 - test/API/tunicode.c | 867 - test/API/tvlstr.c | 1004 -- test/API/tvltypes.c | 3258 ---- test/h5test.c | 82 +- test/h5test.h | 1 + test/tarray.c | 24 +- test/tattr.c | 2708 +-- test/testhdf5.c | 9 + test/tfile.c | 1203 +- test/th5o.c | 47 +- test/th5s.c | 44 +- test/titerate.c | 90 +- test/tmisc.c | 482 +- test/trefer.c | 240 +- test/trefer_deprec.c | 8 + test/tsohm.c | 8 + test/tunicode.c | 81 +- test/tvlstr.c | 5 + testpar/API/CMakeLists.txt | 528 +- testpar/API/H5_api_dataset_test_parallel.c | 3 +- testpar/API/t_bigio.c | 1938 --- testpar/API/t_chunk_alloc.c | 507 - testpar/API/t_coll_chunk.c | 1345 -- testpar/API/t_coll_md_read.c | 624 - testpar/API/t_dset.c | 4317 ----- testpar/API/t_file.c | 1044 -- testpar/API/t_file_image.c | 385 - testpar/API/t_filter_read.c | 532 - testpar/API/t_mdset.c | 2827 ---- testpar/API/t_ph5basic.c | 188 - testpar/API/t_prop.c | 646 - testpar/API/t_pshutdown.c | 147 - testpar/API/t_shapesame.c | 4484 ----- testpar/API/t_span_tree.c | 2588 --- testpar/API/testphdf5.c | 1006 -- testpar/API/testphdf5.h | 342 - testpar/t_bigio.c | 46 +- testpar/t_chunk_alloc.c | 108 +- testpar/t_coll_chunk.c | 166 +- testpar/t_coll_md.c | 52 + testpar/t_dset.c | 204 +- testpar/t_file.c | 40 +- testpar/t_file_image.c | 14 + testpar/t_filter_read.c | 59 +- testpar/t_mdset.c | 210 +- testpar/t_prop.c | 1 + testpar/t_pshutdown.c | 19 + testpar/t_shapesame.c | 132 +- testpar/t_span_tree.c | 127 +- testpar/testphdf5.c | 14 +- 69 files changed, 4631 insertions(+), 92240 deletions(-) delete mode 100644 test/API/tarray.c delete mode 100644 test/API/tattr.c delete mode 100644 test/API/tchecksum.c delete mode 100644 test/API/tconfig.c delete mode 100644 test/API/tcoords.c delete mode 100644 test/API/testhdf5.c delete mode 100644 test/API/testhdf5.h delete mode 100644 test/API/tfile.c delete mode 100644 test/API/tgenprop.c delete mode 100644 test/API/th5o.c delete mode 100644 test/API/th5s.c delete mode 100644 test/API/tid.c delete mode 100644 test/API/titerate.c delete mode 100644 test/API/tmisc.c delete mode 100644 test/API/trefer.c delete mode 100644 test/API/tselect.c delete mode 100644 test/API/ttime.c delete mode 100644 test/API/tunicode.c delete mode 100644 test/API/tvlstr.c delete mode 100644 test/API/tvltypes.c delete mode 100644 testpar/API/t_bigio.c delete mode 100644 testpar/API/t_chunk_alloc.c delete mode 100644 testpar/API/t_coll_chunk.c delete mode 100644 testpar/API/t_coll_md_read.c delete mode 100644 testpar/API/t_dset.c delete mode 100644 testpar/API/t_file.c delete mode 100644 testpar/API/t_file_image.c delete mode 100644 testpar/API/t_filter_read.c delete mode 100644 testpar/API/t_mdset.c delete mode 100644 testpar/API/t_ph5basic.c delete mode 100644 testpar/API/t_prop.c delete mode 100644 testpar/API/t_pshutdown.c delete mode 100644 testpar/API/t_shapesame.c delete mode 100644 testpar/API/t_span_tree.c delete mode 100644 testpar/API/testphdf5.c delete mode 100644 testpar/API/testphdf5.h diff --git a/test/API/CMakeLists.txt b/test/API/CMakeLists.txt index e90a4c8985e..6f6af47c305 100644 --- a/test/API/CMakeLists.txt +++ b/test/API/CMakeLists.txt @@ -9,7 +9,6 @@ # help@hdfgroup.org. # - cmake_minimum_required (VERSION 3.18) project (HDF5_TEST_API C) @@ -26,16 +25,15 @@ configure_file( ) #------------------------------------------------------------------------------ -# Compile kwsys library and setup TestDriver +# Compile kwsys library and setup TestDriver if requested #------------------------------------------------------------------------------ if (HDF5_TEST_API_ENABLE_DRIVER) add_subdirectory (driver) endif () #------------------------------------------------------------------------------ -# Define for API tests +# Variables, definitions, etc. for API tests #------------------------------------------------------------------------------ - set (HDF5_API_TESTS attribute dataset @@ -54,22 +52,22 @@ if (HDF5_TEST_API_ENABLE_ASYNC) ) endif () -# Ported HDF5 tests +# Extra HDF5 tests to run. Each entry in the list +# must be a CMake target name for a test executable +# that was added elsewhere in the project set (HDF5_API_TESTS_EXTRA testhdf5 ) # List of files generated by the HDF5 API tests which -# should be cleaned up in case the test failed to remove -# them +# we should attempt to clean up in case the tests failed +# to remove them +# TODO: Run h5delete tool with appropriate env. vars for +# connectors to remove these files set (HDF5_API_TESTS_FILES + # Test file used by main tests H5_api_test.h5 - H5_api_async_test.h5 - H5_api_async_test_0.h5 - H5_api_async_test_1.h5 - H5_api_async_test_2.h5 - H5_api_async_test_3.h5 - H5_api_async_test_4.h5 + # 'file' tests test_file.h5 invalid_params_file.h5 excl_flag_file.h5 @@ -90,6 +88,7 @@ set (HDF5_API_TESTS_FILES file_size.h5 file_info.h5 double_group_open.h5 + # 'link' tests ext_link_file.h5 ext_link_file_2.h5 ext_link_file_3.h5 @@ -97,7 +96,15 @@ set (HDF5_API_TESTS_FILES ext_link_file_ping_pong_1.h5 ext_link_file_ping_pong_2.h5 ext_link_invalid_params_file.h5 + # 'object' tests object_copy_test_file.h5 + # 'async' tests + H5_api_async_test.h5 + H5_api_async_test_0.h5 + H5_api_async_test_1.h5 + H5_api_async_test_2.h5 + H5_api_async_test_3.h5 + H5_api_async_test_4.h5 ) #----------------------------------------------------------------------------- @@ -136,19 +143,20 @@ target_compile_definitions ( PRIVATE "$<$:${HDF5_DEVELOPER_DEFS}>" ) -if (NOT BUILD_SHARED_LIBS) - TARGET_C_PROPERTIES (h5_api_test STATIC) +# Always prefer linking the shared HDF5 library by default +if (BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (h5_api_test SHARED) target_link_libraries ( h5_api_test PRIVATE - ${HDF5_TEST_LIB_TARGET} + ${HDF5_TEST_LIBSH_TARGET} ) else () - TARGET_C_PROPERTIES (h5_api_test SHARED) + TARGET_C_PROPERTIES (h5_api_test STATIC) target_link_libraries ( h5_api_test PRIVATE - ${HDF5_TEST_LIBSH_TARGET} + ${HDF5_TEST_LIB_TARGET} ) endif () set_target_properties ( @@ -161,109 +169,6 @@ if (HDF5_ENABLE_FORMATTERS) clang_format (HDF5_TEST_h5_api_test_FORMAT h5_api_test) endif () -if (HDF5_TEST_API_INSTALL) - install ( - TARGETS - h5_api_test - EXPORT - ${HDF5_EXPORTED_TARGETS} - DESTINATION - ${HDF5_INSTALL_BIN_DIR} - PERMISSIONS - OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE - COMPONENT - tests - ) -endif () - -#----------------------------------------------------------------------------- -# Build the ported HDF5 test executables -#----------------------------------------------------------------------------- -foreach (api_test_extra ${HDF5_API_TESTS_EXTRA}) - unset (HDF5_API_TEST_EXTRA_SRCS) - - set (HDF5_API_TEST_EXTRA_SRCS - ${HDF5_API_TEST_EXTRA_SRCS} - ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c - ) - - if (${api_test_extra} STREQUAL "testhdf5") - set (HDF5_API_TEST_EXTRA_SRCS - ${HDF5_API_TEST_EXTRA_SRCS} - ${CMAKE_CURRENT_SOURCE_DIR}/tarray.c - ${CMAKE_CURRENT_SOURCE_DIR}/tattr.c - ${CMAKE_CURRENT_SOURCE_DIR}/tchecksum.c - ${CMAKE_CURRENT_SOURCE_DIR}/tconfig.c - ${CMAKE_CURRENT_SOURCE_DIR}/tcoords.c - ${CMAKE_CURRENT_SOURCE_DIR}/tfile.c - ${CMAKE_CURRENT_SOURCE_DIR}/tgenprop.c - ${CMAKE_CURRENT_SOURCE_DIR}/th5o.c - ${CMAKE_CURRENT_SOURCE_DIR}/th5s.c - ${CMAKE_CURRENT_SOURCE_DIR}/tid.c - ${CMAKE_CURRENT_SOURCE_DIR}/titerate.c - ${CMAKE_CURRENT_SOURCE_DIR}/tmisc.c - ${CMAKE_CURRENT_SOURCE_DIR}/trefer.c - ${CMAKE_CURRENT_SOURCE_DIR}/tselect.c - ${CMAKE_CURRENT_SOURCE_DIR}/ttime.c - ${CMAKE_CURRENT_SOURCE_DIR}/tunicode.c - ${CMAKE_CURRENT_SOURCE_DIR}/tvlstr.c - ${CMAKE_CURRENT_SOURCE_DIR}/tvltypes.c - ) - endif () - - add_executable (h5_api_test_${api_test_extra} ${HDF5_API_TEST_EXTRA_SRCS}) - target_include_directories ( - h5_api_test_${api_test_extra} - PRIVATE - "${HDF5_SRC_INCLUDE_DIRS}" - "${HDF5_TEST_SRC_DIR}" - "${HDF5_TEST_API_SRC_DIR}" - "${HDF5_SRC_BINARY_DIR}" - "${HDF5_TEST_BINARY_DIR}" - ) - target_compile_options ( - h5_api_test_${api_test_extra} - PRIVATE - "${HDF5_CMAKE_C_FLAGS}" - ) - target_compile_definitions ( - h5_api_test_${api_test_extra} - PRIVATE - "$<$:${HDF5_DEVELOPER_DEFS}>" - ) - if (NOT BUILD_SHARED_LIBS) - TARGET_C_PROPERTIES (h5_api_test_${api_test_extra} STATIC) - target_link_libraries (h5_api_test_${api_test_extra} PRIVATE ${HDF5_TEST_LIB_TARGET}) - else () - TARGET_C_PROPERTIES (h5_api_test_${api_test_extra} SHARED) - target_link_libraries (h5_api_test_${api_test_extra} PRIVATE ${HDF5_TEST_LIBSH_TARGET}) - endif () - set_target_properties ( - h5_api_test_${api_test_extra} - PROPERTIES - FOLDER test/API - ) - # Add Target to clang-format - if (HDF5_ENABLE_FORMATTERS) - clang_format (HDF5_TEST_h5_api_test_${api_test_extra}_FORMAT h5_api_test_${api_test_extra}) - endif () - - if (HDF5_TEST_API_INSTALL) - install ( - TARGETS - h5_api_test_${api_test_extra} - EXPORT - ${HDF5_EXPORTED_TARGETS} - DESTINATION - ${HDF5_INSTALL_BIN_DIR} - PERMISSIONS - OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE - COMPONENT - tests - ) - endif () -endforeach () - #----------------------------------------------------------------------------- # Add tests if HDF5 serial testing is enabled #----------------------------------------------------------------------------- @@ -293,6 +198,7 @@ if (HDF5_TEST_SERIAL) ) endif () + # Add main API tests to test suite set (last_api_test "") foreach (api_test ${HDF5_API_TESTS}) add_test ( @@ -309,17 +215,6 @@ if (HDF5_TEST_SERIAL) set (last_api_test "h5_api_test_${api_test}") endforeach () - foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) - add_test ( - NAME "h5_api_test_${hdf5_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - endforeach () - # Hook external tests to same test suite foreach (ext_api_test ${HDF5_API_EXT_SERIAL_TESTS}) add_test ( @@ -332,93 +227,99 @@ if (HDF5_TEST_SERIAL) ) endforeach () - # Add tests for each external VOL connector that was built - foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) - # Determine environment variables that need to be set for testing - set (vol_test_env "") - set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - - get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) - list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") - - get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) - foreach (lib_target ${vol_lib_targets}) - get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) - if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" - AND NOT "${lib_target_output_dir}" STREQUAL "" - AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") - endif () - endforeach () - - list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") - - # Add main API tests - set (last_api_test "") - foreach (api_test ${HDF5_API_TESTS}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_${api_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ "${api_test}" - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_${api_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - DEPENDS - "${last_api_test}" - ) - - set (last_api_test "${external_vol_tgt}-h5_api_test_${api_test}") - endforeach () - - # Add any extra HDF5 tests - foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_${hdf5_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_${hdf5_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - ) + if (BUILD_SHARED_LIBS) + # Add tests for each external VOL connector that was built, + # but only if executables that were linked to a shared HDF5 + # library are available, since static executables will cause + # issues when VOL connectors are loaded dynamically + foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) + # Determine environment variables that need to be set for testing + set (vol_test_env "") + set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + + get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) + list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") + + get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) + foreach (lib_target ${vol_lib_targets}) + get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) + if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" + AND NOT "${lib_target_output_dir}" STREQUAL "" + AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") + endif () + endforeach () + + list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") + + # Add main API tests to test suite + set (last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_${api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ "${api_test}" + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_${api_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + DEPENDS + "${last_api_test}" + ) + + set (last_api_test "${external_vol_tgt}-h5_api_test_${api_test}") + endforeach () + + # Add any extra HDF5 tests to test suite + foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_${hdf5_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_${hdf5_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + ) + endforeach () + + # Hook external tests to same test suite + foreach (ext_api_test ${HDF5_API_EXT_SERIAL_TESTS}) + add_test ( + NAME "${external_vol_tgt}-h5_api_ext_test_${ext_api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_ext_test_${ext_api_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + ) + endforeach () endforeach () - - # Hook external tests to same test suite - foreach (ext_api_test ${HDF5_API_EXT_SERIAL_TESTS}) - add_test ( - NAME "${external_vol_tgt}-h5_api_ext_test_${ext_api_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_ext_test_${ext_api_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - ) - endforeach () - endforeach () + endif () else () + # Add main API tests to test suite set (last_api_test "") foreach (api_test ${HDF5_API_TESTS}) add_test ( @@ -431,70 +332,117 @@ if (HDF5_TEST_SERIAL) set (last_api_test "h5_api_test_${api_test}") endforeach () - foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) + # Hook external tests to same test suite + foreach (ext_api_test ${HDF5_API_EXT_SERIAL_TESTS}) add_test ( - NAME "h5_api_test_${hdf5_test}" - COMMAND $ + NAME "h5_api_ext_test_${ext_api_test}" + COMMAND $ ) endforeach () - # Add tests for each external VOL connector that was built - foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) - # Determine environment variables that need to be set for testing - set (vol_test_env "") - set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - - get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) - list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") - - get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) - foreach (lib_target ${vol_lib_targets}) - get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) - if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" - AND NOT "${lib_target_output_dir}" STREQUAL "" - AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") - endif () + if (BUILD_SHARED_LIBS) + # Add tests for each external VOL connector that was built, + # but only if executables that were linked to a shared HDF5 + # library are available, since static executables will cause + # issues when VOL connectors are loaded dynamically + foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) + # Determine environment variables that need to be set for testing + set (vol_test_env "") + set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + + get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) + list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") + + get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) + foreach (lib_target ${vol_lib_targets}) + get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) + if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" + AND NOT "${lib_target_output_dir}" STREQUAL "" + AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") + endif () + endforeach () + + list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") + + # Add main API tests to test suite + set (last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_${api_test}" + COMMAND $ "${api_test}" + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_${api_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + DEPENDS + "${last_api_test}" + ) + + set (last_api_test "${external_vol_tgt}-h5_api_test_${api_test}") + endforeach () + + # Add any extra HDF5 tests to test suite + foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_${hdf5_test}" + COMMAND $ + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_${hdf5_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + ) + endforeach () endforeach () + endif() + endif () +endif () - list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") - - # Add main API tests - set (last_api_test "") - foreach (api_test ${HDF5_API_TESTS}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_${api_test}" - COMMAND $ "${api_test}" - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_${api_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - DEPENDS - "${last_api_test}" - ) - - set (last_api_test "${external_vol_tgt}-h5_api_test_${api_test}") - endforeach () +#----------------------------------------------------------------------------- +# Install the main API test executable and any +# extra HDF5 tests if requested +#----------------------------------------------------------------------------- +if (HDF5_EXPORTED_TARGETS AND HDF5_TEST_API_INSTALL) + install ( + TARGETS + h5_api_test + EXPORT + ${HDF5_EXPORTED_TARGETS} + DESTINATION + ${HDF5_INSTALL_BIN_DIR} + PERMISSIONS + OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE + COMPONENT + tests + ) - # Add any extra HDF5 tests - foreach (hdf5_test ${HDF5_API_TESTS_EXTRA}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_${hdf5_test}" - COMMAND $ - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_${hdf5_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - ) - endforeach () - endforeach () - endif () + foreach (api_test_extra ${HDF5_API_TESTS_EXTRA}) + if (TARGET ${api_test_extra}) + set_target_properties ( + ${api_test_extra} + PROPERTIES + OUTPUT_NAME "h5_api_test_${api_test_extra}" + ) + install ( + TARGETS + ${api_test_extra} + EXPORT + ${HDF5_EXPORTED_TARGETS} + DESTINATION + ${HDF5_INSTALL_BIN_DIR} + PERMISSIONS + OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE + COMPONENT + tests + ) + endif () + endforeach () endif () diff --git a/test/API/H5_api_dataset_test.c b/test/API/H5_api_dataset_test.c index 996e2cce4aa..d36b77b969a 100644 --- a/test/API/H5_api_dataset_test.c +++ b/test/API/H5_api_dataset_test.c @@ -10817,8 +10817,8 @@ test_read_partial_chunk_hyperslab_selection(void) * then read correctly when the selection used in a chunked * dataset's file dataspace is a point selection. */ -#define FIXED_DIMSIZE 25 -#define FIXED_CHUNK_DIMSIZE 10 +/* #define FIXED_DIMSIZE 25 */ +/* #define FIXED_CHUNK_DIMSIZE 10 */ static int test_read_partial_chunk_point_selection(void) { @@ -10827,8 +10827,8 @@ test_read_partial_chunk_point_selection(void) return 1; } -#undef FIXED_DIMSIZE -#undef FIXED_CHUNK_DIMSIZE +/* #undef FIXED_DIMSIZE */ +/* #undef FIXED_CHUNK_DIMSIZE */ /* * A test to verify that H5Dvlen_get_buf_size returns diff --git a/test/API/tarray.c b/test/API/tarray.c deleted file mode 100644 index 7ab21146f81..00000000000 --- a/test/API/tarray.c +++ /dev/null @@ -1,2247 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tarray - * - * Test the Array Datatype functionality - * - *************************************************************/ - -#include "testhdf5.h" -/* #include "H5srcdir.h" */ - -#define FILENAME "tarray1.h5" -#define TESTFILE "tarrold.h5" - -/* 1-D array datatype */ -#define ARRAY1_RANK 1 -#define ARRAY1_DIM1 4 - -/* 3-D array datatype */ -#define ARRAY2_RANK 3 -#define ARRAY2_DIM1 3 -#define ARRAY2_DIM2 4 -#define ARRAY2_DIM3 5 - -/* 2-D array datatype */ -#define ARRAY3_RANK 2 -#define ARRAY3_DIM1 6 -#define ARRAY3_DIM2 3 - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -/* Parameters used with the test_array_bkg() test */ -#define FIELDNAME "ArrayofStructures" -#define LENGTH 5 -#define ALEN 10 -#define RANK 1 -#define NMAX 100 - -/* Struct used with test_array_bkg() test */ -typedef struct { - int nsubfields; - char *name[NMAX]; - size_t offset[NMAX]; - hid_t datatype[NMAX]; - -} CmpDTSinfo; - -/* Forward declarations for custom vlen memory manager functions */ -void *test_array_alloc_custom(size_t size, void *info); -void test_array_free_custom(void *mem, void *info); - -/*------------------------------------------------------------------------- - * Function: test_array_atomic_1d - * - * Purpose: Test basic array datatype code. - * Tests 1-D array of atomic datatypes. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_atomic_1d(void) -{ - int wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ - int rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hsize_t sdims1[] = {SPACE1_DIM1}; - hsize_t tdims1[] = {ARRAY1_DIM1}; - int ndims; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - int i, j; /* counting variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 1-D Array of Atomic Datatypes Functionality\n")); - - /* Allocate and initialize array data to write */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) - wdata[i][j] = i * 10 + j; - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid1 = H5Tarray_create2(H5T_NATIVE_INT, ARRAY1_RANK, tdims1); - CHECK(tid1, FAIL, "H5Tarray_create2"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get the datatype */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(tid1); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(tid1, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) - if (wdata[i][j] != rdata[i][j]) { - TestErrPrintf("Array data information doesn't match!, wdata[%d][%d]=%d, rdata[%d][%d]=%d\n", - (int)i, (int)j, (int)wdata[i][j], (int)i, (int)j, (int)rdata[i][j]); - continue; - } /* end if */ - - /* Close Datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_array_atomic_1d() */ - -/*------------------------------------------------------------------------- - * Function: test_array_funcs - * - * Purpose: Test some type functions that are and aren't supposed to - * work with array type. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_funcs(void) -{ - hid_t type; /* Datatype ID */ - hsize_t tdims1[] = {ARRAY1_DIM1}; - size_t size; - H5T_pad_t inpad; - H5T_norm_t norm; - H5T_cset_t cset; - H5T_str_t strpad; - herr_t ret; /* Generic return value */ - - /* Create a datatype to refer to */ - type = H5Tarray_create2(H5T_IEEE_F32BE, ARRAY1_RANK, tdims1); - CHECK(type, FAIL, "H5Tarray_create2"); - - size = H5Tget_precision(type); - CHECK(size, 0, "H5Tget_precision"); - - size = H5Tget_size(type); - CHECK(size, 0, "H5Tget_size"); - - size = H5Tget_ebias(type); - CHECK(size, 0, "H5Tget_ebias"); - - ret = H5Tset_pad(type, H5T_PAD_ZERO, H5T_PAD_ONE); - CHECK(ret, FAIL, "H5Tset_pad"); - - inpad = H5Tget_inpad(type); - CHECK(inpad, FAIL, "H5Tget_inpad"); - - norm = H5Tget_norm(type); - CHECK(norm, FAIL, "H5Tget_norm"); - - ret = H5Tset_offset(type, (size_t)16); - CHECK(ret, FAIL, "H5Tset_offset"); - - H5E_BEGIN_TRY - { - cset = H5Tget_cset(type); - } - H5E_END_TRY - VERIFY(cset, FAIL, "H5Tget_cset"); - - H5E_BEGIN_TRY - { - strpad = H5Tget_strpad(type); - } - H5E_END_TRY - VERIFY(strpad, FAIL, "H5Tget_strpad"); - - /* Close datatype */ - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); -} /* end test_array_funcs() */ - -/*------------------------------------------------------------------------- - * Function: test_array_atomic_3d - * - * Purpose: Test basic array datatype code. - * Tests 3-D array of atomic datatypes. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_atomic_3d(void) -{ - int wdata[SPACE1_DIM1][ARRAY2_DIM1][ARRAY2_DIM2][ARRAY2_DIM3]; /* Information to write */ - int rdata[SPACE1_DIM1][ARRAY2_DIM1][ARRAY2_DIM2][ARRAY2_DIM3]; /* Information read in */ - hid_t fid; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t sdims1[] = {SPACE1_DIM1}; - hsize_t tdims2[] = {ARRAY2_DIM1, ARRAY2_DIM2, ARRAY2_DIM3}; - int ndims; /* Array rank for reading */ - hsize_t rdims2[H5S_MAX_RANK]; /* Array dimensions for reading */ - int i, j, k, l; /* counting variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 3-D Array of Atomic Datatypes Functionality\n")); - - /* Allocate and initialize array data to write */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY2_DIM1; j++) - for (k = 0; k < ARRAY2_DIM2; k++) - for (l = 0; l < ARRAY2_DIM3; l++) - wdata[i][j][k][l] = i * 1000 + j * 100 + k * 10 + l; - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(SPACE1_RANK, sdims1, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid = H5Tarray_create2(H5T_NATIVE_INT, ARRAY2_RANK, tdims2); - CHECK(tid, FAIL, "H5Tarray_create2"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, "Dataset1", tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get the datatype */ - tid = H5Dget_type(dataset); - CHECK(tid, FAIL, "H5Dget_type"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(tid); - VERIFY(ndims, ARRAY2_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(tid, rdims2); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims2[i] != tdims2[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims2[%d]=%d, tdims2[%d]=%d\n", - (int)i, (int)rdims2[i], (int)i, (int)tdims2[i]); - continue; - } /* end if */ - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY2_DIM1; j++) - for (k = 0; k < ARRAY2_DIM2; k++) - for (l = 0; l < ARRAY2_DIM3; l++) - if (wdata[i][j][k][l] != rdata[i][j][k][l]) { - TestErrPrintf("Array data information doesn't match!, wdata[%d][%d][%d][%d]=%d, " - "rdata[%d][%d][%d][%d]=%d\n", - (int)i, (int)j, (int)k, (int)l, (int)wdata[i][j][k][l], (int)i, (int)j, - (int)k, (int)l, (int)rdata[i][j][k][l]); - continue; - } /* end if */ - - /* Close Datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_array_atomic_3d() */ - -/*------------------------------------------------------------------------- - * Function: test_array_array_atomic - * - * Purpose: Test basic array datatype code. - * Tests 1-D array 2-D arrays of atomic datatypes. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_array_atomic(void) -{ - int wdata[SPACE1_DIM1][ARRAY1_DIM1][ARRAY3_DIM1][ARRAY3_DIM2]; /* Information to write */ - int rdata[SPACE1_DIM1][ARRAY1_DIM1][ARRAY3_DIM1][ARRAY3_DIM2]; /* Information read in */ - hid_t fid; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid1; /* 1-D array Datatype ID */ - hid_t tid2; /* 2-D array Datatype ID */ - hsize_t sdims1[] = {SPACE1_DIM1}; - hsize_t tdims1[] = {ARRAY1_DIM1}; - hsize_t tdims2[] = {ARRAY3_DIM1, ARRAY3_DIM2}; - int ndims1; /* Array rank for reading */ - int ndims2; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - hsize_t rdims2[H5S_MAX_RANK]; /* Array dimensions for reading */ - int i, j, k, l; /* counting variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 1-D Array 2-D Arrays of Atomic Datatypes Functionality\n")); - - /* Allocate and initialize array data to write */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) - for (k = 0; k < ARRAY3_DIM1; k++) - for (l = 0; l < ARRAY3_DIM2; l++) - wdata[i][j][k][l] = i * 1000 + j * 100 + k * 10 + l; - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(SPACE1_RANK, sdims1, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create a 2-D datatype to refer to */ - tid2 = H5Tarray_create2(H5T_NATIVE_INT, ARRAY3_RANK, tdims2); - CHECK(tid2, FAIL, "H5Tarray_create2"); - - /* Create a 1-D datatype to refer to */ - tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); - CHECK(tid1, FAIL, "H5Tarray_create2"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, "Dataset1", tid1, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatypes */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get the 1-D datatype */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Check the 1-D array rank */ - ndims1 = H5Tget_array_ndims(tid1); - VERIFY(ndims1, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the 1-D array dimensions */ - ret = H5Tget_array_dims2(tid1, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims1; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Get the 2-D datatype */ - tid2 = H5Tget_super(tid1); - CHECK(tid2, FAIL, "H5Tget_super"); - - /* Check the 2-D array rank */ - ndims2 = H5Tget_array_ndims(tid2); - VERIFY(ndims2, ARRAY3_RANK, "H5Tget_array_ndims"); - - /* Get the 2-D array dimensions */ - ret = H5Tget_array_dims2(tid2, rdims2); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims2; i++) - if (rdims2[i] != tdims2[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims2[%d]=%d, tdims2[%d]=%d\n", - (int)i, (int)rdims2[i], (int)i, (int)tdims2[i]); - continue; - } /* end if */ - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) - for (k = 0; k < ARRAY3_DIM1; k++) - for (l = 0; l < ARRAY3_DIM2; l++) - if (wdata[i][j][k][l] != rdata[i][j][k][l]) { - TestErrPrintf("Array data information doesn't match!, wdata[%d][%d][%d][%d]=%d, " - "rdata[%d][%d][%d][%d]=%d\n", - (int)i, (int)j, (int)k, (int)l, (int)wdata[i][j][k][l], (int)i, (int)j, - (int)k, (int)l, (int)rdata[i][j][k][l]); - continue; - } /* end if */ - - /* Close Datatypes */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_array_array_atomic() */ - -/*------------------------------------------------------------------------- - * Function: test_array_compound_atomic - * - * Purpose: Test basic array datatype code. - * Tests 1-D array of compound datatypes (with no array fields). - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_compound_atomic(void) -{ - typedef struct { /* Typedef for compound datatype */ - int i; - float f; - } s1_t; - - s1_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ - s1_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Array Datatype ID */ - hid_t tid2; /* Compound Datatype ID */ - hsize_t sdims1[] = {SPACE1_DIM1}; - hsize_t tdims1[] = {ARRAY1_DIM1}; - int ndims; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - int nmemb; /* Number of compound members */ - char *mname; /* Name of compound field */ - size_t off; /* Offset of compound field */ - hid_t mtid; /* Datatype ID for field */ - int i, j; /* counting variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 1-D Array of Compound Atomic Datatypes Functionality\n")); - - /* Initialize array data to write */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) { - wdata[i][j].i = i * 10 + j; - wdata[i][j].f = (float)i * 2.5F + (float)j; - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a compound datatype to refer to */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Insert integer field */ - ret = H5Tinsert(tid2, "i", HOFFSET(s1_t, i), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Insert float field */ - ret = H5Tinsert(tid2, "f", HOFFSET(s1_t, f), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create an array datatype to refer to */ - tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); - CHECK(tid1, FAIL, "H5Tarray_create2"); - - /* Close compound datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get the datatype */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(tid1); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(tid1, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Get the compound datatype */ - tid2 = H5Tget_super(tid1); - CHECK(tid2, FAIL, "H5Tget_super"); - - /* Check the number of members */ - nmemb = H5Tget_nmembers(tid2); - VERIFY(nmemb, 2, "H5Tget_nmembers"); - - /* Check the 1st field's name */ - mname = H5Tget_member_name(tid2, 0); - CHECK_PTR(mname, "H5Tget_member_name"); - if (strcmp(mname, "i") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - H5free_memory(mname); - - /* Check the 1st field's offset */ - off = H5Tget_member_offset(tid2, 0); - VERIFY(off, HOFFSET(s1_t, i), "H5Tget_member_offset"); - - /* Check the 1st field's datatype */ - mtid = H5Tget_member_type(tid2, 0); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_NATIVE_INT)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Check the 2nd field's name */ - mname = H5Tget_member_name(tid2, 1); - CHECK_PTR(mname, "H5Tget_member_name"); - if (strcmp(mname, "f") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - H5free_memory(mname); - - /* Check the 2nd field's offset */ - off = H5Tget_member_offset(tid2, 1); - VERIFY(off, HOFFSET(s1_t, f), "H5Tget_member_offset"); - - /* Check the 2nd field's datatype */ - mtid = H5Tget_member_type(tid2, 1); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_NATIVE_FLOAT)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Close Compound Datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) { - if (wdata[i][j].i != rdata[i][j].i) { - TestErrPrintf( - "Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n", (int)i, - (int)j, (int)wdata[i][j].i, (int)i, (int)j, (int)rdata[i][j].i); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(wdata[i][j].f, rdata[i][j].f)) { - TestErrPrintf( - "Array data information doesn't match!, wdata[%d][%d].f=%f, rdata[%d][%d].f=%f\n", (int)i, - (int)j, (double)wdata[i][j].f, (int)i, (int)j, (double)rdata[i][j].f); - continue; - } /* end if */ - } /* end for */ - - /* Close Datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_array_compound_atomic() */ - -/*------------------------------------------------------------------------- - * Function: test_array_compound_array - * - * Purpose: Test basic array datatype code. - * Tests 1-D array of compound datatypes (with array fields). - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_compound_array(void) -{ - typedef struct { /* Typedef for compound datatype */ - int i; - float f[ARRAY1_DIM1]; - } s1_t; - - s1_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ - s1_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Array Datatype ID */ - hid_t tid2; /* Compound Datatype ID */ - hid_t tid3; /* Nested Array Datatype ID */ - hsize_t sdims1[] = {SPACE1_DIM1}; - hsize_t tdims1[] = {ARRAY1_DIM1}; - int ndims; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - int nmemb; /* Number of compound members */ - char *mname; /* Name of compound field */ - size_t off; /* Offset of compound field */ - hid_t mtid; /* Datatype ID for field */ - H5T_class_t mclass; /* Datatype class for field */ - int i, j, k; /* counting variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 1-D Array of Compound Array Datatypes Functionality\n")); - - /* Initialize array data to write */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) { - wdata[i][j].i = i * 10 + j; - for (k = 0; k < ARRAY1_DIM1; k++) - wdata[i][j].f[k] = (float)i * 10.0F + (float)j * 2.5F + (float)k; - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a compound datatype to refer to */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Insert integer field */ - ret = H5Tinsert(tid2, "i", HOFFSET(s1_t, i), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create an array of floats datatype */ - tid3 = H5Tarray_create2(H5T_NATIVE_FLOAT, ARRAY1_RANK, tdims1); - CHECK(tid3, FAIL, "H5Tarray_create2"); - - /* Insert float array field */ - ret = H5Tinsert(tid2, "f", HOFFSET(s1_t, f), tid3); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Close array of floats field datatype */ - ret = H5Tclose(tid3); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create an array datatype to refer to */ - tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); - CHECK(tid1, FAIL, "H5Tarray_create2"); - - /* Close compound datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get the datatype */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(tid1); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(tid1, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Get the compound datatype */ - tid2 = H5Tget_super(tid1); - CHECK(tid2, FAIL, "H5Tget_super"); - - /* Check the number of members */ - nmemb = H5Tget_nmembers(tid2); - VERIFY(nmemb, 2, "H5Tget_nmembers"); - - /* Check the 1st field's name */ - mname = H5Tget_member_name(tid2, 0); - CHECK_PTR(mname, "H5Tget_member_name"); - if (strcmp(mname, "i") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - H5free_memory(mname); - - /* Check the 1st field's offset */ - off = H5Tget_member_offset(tid2, 0); - VERIFY(off, HOFFSET(s1_t, i), "H5Tget_member_offset"); - - /* Check the 1st field's datatype */ - mtid = H5Tget_member_type(tid2, 0); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_NATIVE_INT)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Check the 2nd field's name */ - mname = H5Tget_member_name(tid2, 1); - CHECK_PTR(mname, "H5Tget_member_name"); - if (strcmp(mname, "f") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - H5free_memory(mname); - - /* Check the 2nd field's offset */ - off = H5Tget_member_offset(tid2, 1); - VERIFY(off, HOFFSET(s1_t, f), "H5Tget_member_offset"); - - /* Check the 2nd field's datatype */ - mtid = H5Tget_member_type(tid2, 1); - CHECK(mtid, FAIL, "H5Tget_member_type"); - - /* Get the 2nd field's class */ - mclass = H5Tget_class(mtid); - VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(mtid); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(mtid, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Nested array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Check the nested array's datatype */ - tid3 = H5Tget_super(mtid); - CHECK(tid3, FAIL, "H5Tget_super"); - - if ((ret = H5Tequal(tid3, H5T_NATIVE_FLOAT)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - - /* Close the array's base type datatype */ - ret = H5Tclose(tid3); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Close the member datatype */ - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Close Compound Datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - for (j = 0; j < ARRAY1_DIM1; j++) { - if (wdata[i][j].i != rdata[i][j].i) { - TestErrPrintf( - "Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n", (int)i, - (int)j, (int)wdata[i][j].i, (int)i, (int)j, (int)rdata[i][j].i); - continue; - } /* end if */ - for (k = 0; k < ARRAY1_DIM1; k++) - if (!H5_FLT_ABS_EQUAL(wdata[i][j].f[k], rdata[i][j].f[k])) { - TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].f[%d]=%f, " - "rdata[%d][%d].f[%d]=%f\n", - (int)i, (int)j, (int)k, (double)wdata[i][j].f[k], (int)i, (int)j, (int)k, - (double)rdata[i][j].f[k]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Close Datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_array_compound_array() */ - -/**************************************************************** -** -** test_array_alloc_custom(): Test VL datatype custom memory -** allocation routines. This routine just uses malloc to -** allocate the memory and increments the amount of memory -** allocated. -** -****************************************************************/ - -/*------------------------------------------------------------------------- - * Function: test_array_alloc_custom - * - * Purpose: Memory allocator for testing VL datatype custom memory - * allocation routines. - * - * This routine just uses malloc to allocate the memory and - * increments the amount of memory allocated. - * - * Return: - * - * Success: A memory buffer - * Failure: NULL - * - *------------------------------------------------------------------------- - */ -void * -test_array_alloc_custom(size_t size, void *info) -{ - void *ret_value = NULL; /* Pointer to return */ - size_t *mem_used = (size_t *)info; /* Pointer to the memory used */ - size_t extra; /* Extra space needed */ - - /* - * This weird contortion is required on the DEC Alpha to keep the - * alignment correct - QAK - */ - extra = MAX(sizeof(void *), sizeof(size_t)); - - if ((ret_value = malloc(extra + size)) != NULL) { - *(size_t *)ret_value = size; - *mem_used += size; - } /* end if */ - - ret_value = ((unsigned char *)ret_value) + extra; - return ret_value; -} /* end test_array_alloc_custom() */ - -/*------------------------------------------------------------------------- - * Function: test_array_free_custom - * - * Purpose: Memory free function for testing VL datatype custom memory - * allocation routines. - * - * This routine just uses free to free the memory and - * decrements the amount of memory allocated. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -void -test_array_free_custom(void *_mem, void *info) -{ - unsigned char *mem = NULL; /* Pointer to mem to be freed */ - size_t *mem_used = (size_t *)info; /* Pointer to the memory used */ - size_t extra; /* Extra space needed */ - - /* - * This weird contortion is required on the DEC Alpha to keep the - * alignment correct - QAK - */ - extra = MAX(sizeof(void *), sizeof(size_t)); - - if (_mem != NULL) { - mem = ((unsigned char *)_mem) - extra; - *mem_used -= *(size_t *)((void *)mem); - free(mem); - } /* end if */ - -} /* end test_array_free_custom() */ - -/*------------------------------------------------------------------------- - * Function: test_array_vlen_atomic - * - * Purpose: Test basic array datatype code. - * Tests 1-D array of atomic VL datatypes. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_vlen_atomic(void) -{ - hvl_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Array Datatype ID */ - hid_t tid2; /* VL Datatype ID */ - hid_t tid3; /* Atomic Datatype ID */ - hsize_t sdims1[] = {SPACE1_DIM1}; - hsize_t tdims1[] = {ARRAY1_DIM1}; - int ndims; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - H5T_class_t mclass; /* Datatype class for VL */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t size; /* Number of bytes which will be used */ - size_t mem_used = 0; /* Memory used during allocation */ - int i, j, k; /* counting variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 1-D Array of Atomic Variable-Length Datatypes Functionality\n")); - - /* Initialize array data to write */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) { - wdata[i][j].p = malloc((size_t)(i + j + 1) * sizeof(unsigned int)); - wdata[i][j].len = (size_t)(i + j + 1); - for (k = 0; k < (i + j + 1); k++) - ((unsigned int *)wdata[i][j].p)[k] = (unsigned int)(i * 100 + j * 10 + k); - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a compound datatype to refer to */ - tid2 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Create an array datatype to refer to */ - tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); - CHECK(tid1, FAIL, "H5Tarray_create2"); - - /* Close VL datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get the dataspace */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get the datatype */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(tid1); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(tid1, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Get the VL datatype */ - tid2 = H5Tget_super(tid1); - CHECK(tid2, FAIL, "H5Tget_super"); - - /* Get the 2nd field's class */ - mclass = H5Tget_class(tid2); - VERIFY(mclass, H5T_VLEN, "H5Tget_class"); - - /* Check the VL datatype's base type */ - tid3 = H5Tget_super(tid2); - CHECK(tid3, FAIL, "H5Tget_super"); - - if ((ret = H5Tequal(tid3, H5T_NATIVE_UINT)) <= 0) - TestErrPrintf("VL base datatype is incorrect!, ret=%d\n", (int)ret); - - /* Close the array's base type datatype */ - ret = H5Tclose(tid3); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close VL Datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_array_alloc_custom, &mem_used, test_array_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + - * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64 elements - */ - VERIFY(size, 64 * sizeof(unsigned int), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + - * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64 elements - */ - VERIFY(mem_used, 64 * sizeof(unsigned int), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - for (j = 0; j < ARRAY1_DIM1; j++) { - if (wdata[i][j].len != rdata[i][j].len) { - TestErrPrintf("VL data length don't match!, wdata[%d][%d].len=%d, rdata[%d][%d].len=%d\n", - (int)i, (int)j, (int)wdata[i][j].len, (int)i, (int)j, (int)rdata[i][j].len); - continue; - } /* end if */ - for (k = 0; k < (int)rdata[i][j].len; k++) { - if (((unsigned int *)wdata[i][j].p)[k] != ((unsigned int *)rdata[i][j].p)[k]) { - TestErrPrintf( - "VL data values don't match!, wdata[%d][%d].p[%d]=%d, rdata[%d][%d].p[%d]=%d\n", - (int)i, (int)j, (int)k, (int)((unsigned int *)wdata[i][j].p)[k], (int)i, (int)j, - (int)k, (int)((unsigned int *)rdata[i][j].p)[k]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - } /* end for */ - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close Datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_array_vlen_atomic() */ - -/*------------------------------------------------------------------------- - * Function: test_array_vlen_array - * - * Purpose: Test basic array datatype code. - * Tests 1-D array of 1-D array VL datatypes. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_vlen_array(void) -{ - hvl_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Array Datatype ID */ - hid_t tid2; /* VL Datatype ID */ - hid_t tid3; /* Nested Array Datatype ID */ - hid_t tid4; /* Atomic Datatype ID */ - hsize_t sdims1[] = {SPACE1_DIM1}; - hsize_t tdims1[] = {ARRAY1_DIM1}; - int ndims; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - H5T_class_t mclass; /* Datatype class for VL */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t size; /* Number of bytes which will be used */ - size_t mem_used = 0; /* Memory used during allocation */ - int i, j, k, l; /* Index variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 1-D Array of 1-D Array Variable-Length Datatypes Functionality\n")); - - /* Initialize array data to write */ - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < ARRAY1_DIM1; j++) { - wdata[i][j].p = malloc((size_t)(i + j + 1) * sizeof(unsigned int) * (size_t)ARRAY1_DIM1); - wdata[i][j].len = (size_t)(i + j + 1); - for (k = 0; k < (i + j + 1); k++) - for (l = 0; l < ARRAY1_DIM1; l++) - ((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l] = - (unsigned int)(i * 1000 + j * 100 + k * 10 + l); - } - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create the nested array datatype to refer to */ - tid3 = H5Tarray_create2(H5T_NATIVE_UINT, ARRAY1_RANK, tdims1); - CHECK(tid3, FAIL, "H5Tarray_create2"); - - /* Create a VL datatype of 1-D arrays to refer to */ - tid2 = H5Tvlen_create(tid3); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Close nested array datatype */ - ret = H5Tclose(tid3); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create an array datatype to refer to */ - tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1); - CHECK(tid1, FAIL, "H5Tarray_create2"); - - /* Close VL datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get the dataspace */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get the datatype */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(tid1); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(tid1, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Get the VL datatype */ - tid2 = H5Tget_super(tid1); - CHECK(tid2, FAIL, "H5Tget_super"); - - /* Get the VL datatype's class */ - mclass = H5Tget_class(tid2); - VERIFY(mclass, H5T_VLEN, "H5Tget_class"); - - /* Check the VL datatype's base type */ - tid3 = H5Tget_super(tid2); - CHECK(tid3, FAIL, "H5Tget_super"); - - /* Get the nested array datatype's class */ - mclass = H5Tget_class(tid3); - VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(tid3); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(tid3, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Check the array's base type */ - tid4 = H5Tget_super(tid3); - CHECK(tid4, FAIL, "H5Tget_super"); - - if ((ret = H5Tequal(tid4, H5T_NATIVE_UINT)) <= 0) - TestErrPrintf("VL base datatype is incorrect!, ret=%d\n", (int)ret); - - /* Close the array's base type datatype */ - ret = H5Tclose(tid4); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close the nested array datatype */ - ret = H5Tclose(tid3); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close VL Datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_array_alloc_custom, &mem_used, test_array_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + - * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64*ARRAY1_DIM1 elements - */ - VERIFY(size, 64 * (sizeof(unsigned int) * ARRAY1_DIM1), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) + - * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64*ARRAY1_DIM1 elements - */ - VERIFY(mem_used, 64 * (sizeof(unsigned int) * ARRAY1_DIM1), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - for (j = 0; j < ARRAY1_DIM1; j++) { - if (wdata[i][j].len != rdata[i][j].len) { - TestErrPrintf("VL data length don't match!, wdata[%d][%d].len=%d, rdata[%d][%d].len=%d\n", - (int)i, (int)j, (int)wdata[i][j].len, (int)i, (int)j, (int)rdata[i][j].len); - continue; - } /* end if */ - for (k = 0; k < (int)rdata[i][j].len; k++) { - for (l = 0; l < ARRAY1_DIM1; l++) { - if (((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l] != - ((unsigned int *)rdata[i][j].p)[k * ARRAY1_DIM1 + l]) { - TestErrPrintf("VL data values don't match!, wdata[%d][%d].p[%d][%d]=%d, " - "rdata[%d][%d].p[%d][%d]=%d\n", - (int)i, (int)j, (int)k, (int)l, - (int)((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l], (int)i, - (int)j, (int)k, (int)l, - (int)((unsigned int *)rdata[i][j].p)[k * ARRAY1_DIM1 + l]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - } /* end for */ - } /* end for */ - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close Datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_array_vlen_array() */ - -/*------------------------------------------------------------------------- - * Function: test_array_bkg - * - * Purpose: Test basic array datatype code. - * Tests reading compound datatype with array fields and - * writing partial fields. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -static void -test_array_bkg(void) -{ - herr_t status = -1; - - hid_t fid, array_dt; - hid_t space; - hid_t type; - hid_t dataset; - - hsize_t dim[] = {LENGTH}; - hsize_t dima[] = {ALEN}; - - int i, j; - unsigned ndims[3] = {1, 1, 1}; - - typedef struct { - int a[ALEN]; - float b[ALEN]; - double c[ALEN]; - } CmpField; - - CmpField cf[LENGTH]; - CmpField cfr[LENGTH]; - CmpDTSinfo *dtsinfo = NULL; - - typedef struct { - float b[ALEN]; - } fld_t; - - fld_t fld[LENGTH]; - fld_t fldr[LENGTH]; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Partial I/O of Array Fields in Compound Datatype Functionality\n")); - - /* Initialize the data */ - /* ------------------- */ - dtsinfo = (CmpDTSinfo *)malloc(sizeof(CmpDTSinfo)); - CHECK_PTR(dtsinfo, "malloc"); - memset(dtsinfo, 0, sizeof(CmpDTSinfo)); - for (i = 0; i < LENGTH; i++) { - for (j = 0; j < ALEN; j++) { - cf[i].a[j] = 100 * (i + 1) + j; - cf[i].b[j] = 100.0F * ((float)i + 1.0F) + 0.01F * (float)j; - cf[i].c[j] = (double)(100.0F * ((float)i + 1.0F) + 0.02F * (float)j); - } /* end for */ - } /* end for */ - - /* Set the number of data members */ - /* ------------------------------ */ - dtsinfo->nsubfields = 3; - - /* Initialize the offsets */ - /* ----------------------- */ - dtsinfo->offset[0] = HOFFSET(CmpField, a); - dtsinfo->offset[1] = HOFFSET(CmpField, b); - dtsinfo->offset[2] = HOFFSET(CmpField, c); - - /* Initialize the data type IDs */ - /* ---------------------------- */ - dtsinfo->datatype[0] = H5T_NATIVE_INT; - dtsinfo->datatype[1] = H5T_NATIVE_FLOAT; - dtsinfo->datatype[2] = H5T_NATIVE_DOUBLE; - - /* Initialize the names of data members */ - /* ------------------------------------ */ - for (i = 0; i < dtsinfo->nsubfields; i++) - dtsinfo->name[i] = (char *)calloc((size_t)20, sizeof(char)); - - strcpy(dtsinfo->name[0], "One"); - strcpy(dtsinfo->name[1], "Two"); - strcpy(dtsinfo->name[2], "Three"); - - /* Create file */ - /* ----------- */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create data space */ - /* ----------------- */ - space = H5Screate_simple(RANK, dim, NULL); - CHECK(space, FAIL, "H5Screate_simple"); - - /* Create the memory data type */ - /* --------------------------- */ - type = H5Tcreate(H5T_COMPOUND, sizeof(CmpField)); - CHECK(type, FAIL, "H5Tcreate"); - - /* Add members to the compound data type */ - /* -------------------------------------- */ - for (i = 0; i < dtsinfo->nsubfields; i++) { - array_dt = H5Tarray_create2(dtsinfo->datatype[i], ndims[i], dima); - CHECK(array_dt, FAIL, "H5Tarray_create2"); - - status = H5Tinsert(type, dtsinfo->name[i], dtsinfo->offset[i], array_dt); - CHECK(status, FAIL, "H5Tinsert"); - - status = H5Tclose(array_dt); - CHECK(status, FAIL, "H5Tclose"); - } /* end for */ - - /* Create the dataset */ - /* ------------------ */ - dataset = H5Dcreate2(fid, FIELDNAME, type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write data to the dataset */ - /* ------------------------- */ - status = H5Dwrite(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cf); - CHECK(status, FAIL, "H5Dwrite"); - - status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr); - CHECK(status, FAIL, "H5Dread"); - - /* Verify correct data */ - /* ------------------- */ - for (i = 0; i < LENGTH; i++) { - for (j = 0; j < ALEN; j++) { - if (cf[i].a[j] != cfr[i].a[j]) { - TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i, - (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) { - TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, - (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]); - continue; - } /* end if */ - if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) { - TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, - (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Release memory resources */ - /* ------------------------ */ - for (i = 0; i < dtsinfo->nsubfields; i++) - free(dtsinfo->name[i]); - - /* Release IDs */ - /* ----------- */ - status = H5Tclose(type); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Sclose(space); - CHECK(status, FAIL, "H5Sclose"); - - status = H5Dclose(dataset); - CHECK(status, FAIL, "H5Dclose"); - - status = H5Fclose(fid); - CHECK(status, FAIL, "H5Fclose"); - - /******************************/ - /* Reopen the file and update */ - /******************************/ - - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - dataset = H5Dopen2(fid, FIELDNAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - type = H5Tcreate(H5T_COMPOUND, sizeof(fld_t)); - CHECK(type, FAIL, "H5Tcreate"); - - array_dt = H5Tarray_create2(H5T_NATIVE_FLOAT, 1, dima); - CHECK(array_dt, FAIL, "H5Tarray_create2"); - - status = H5Tinsert(type, "Two", HOFFSET(fld_t, b), array_dt); - CHECK(status, FAIL, "H5Tinsert"); - - /* Initialize the data to overwrite */ - /* -------------------------------- */ - for (i = 0; i < LENGTH; i++) - for (j = 0; j < ALEN; j++) - cf[i].b[j] = fld[i].b[j] = 1.313F; - - status = H5Dwrite(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, fld); - CHECK(status, FAIL, "H5Dwrite"); - - /* Read just the field changed */ - status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, fldr); - CHECK(status, FAIL, "H5Dread"); - - for (i = 0; i < LENGTH; i++) - for (j = 0; j < ALEN; j++) - if (!H5_FLT_ABS_EQUAL(fld[i].b[j], fldr[i].b[j])) { - TestErrPrintf("Field data doesn't match, fld[%d].b[%d]=%f, fldr[%d].b[%d]=%f\n", (int)i, - (int)j, (double)fld[i].b[j], (int)i, (int)j, (double)fldr[i].b[j]); - continue; - } /* end if */ - - status = H5Tclose(type); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Tclose(array_dt); - CHECK(status, FAIL, "H5Tclose"); - - type = H5Dget_type(dataset); - CHECK(type, FAIL, "H5Dget_type"); - - /* Read the entire dataset again */ - status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr); - CHECK(status, FAIL, "H5Dread"); - - /* Verify correct data */ - /* ------------------- */ - for (i = 0; i < LENGTH; i++) { - for (j = 0; j < ALEN; j++) { - if (cf[i].a[j] != cfr[i].a[j]) { - TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i, - (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) { - TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, - (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]); - continue; - } /* end if */ - if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) { - TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, - (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - status = H5Dclose(dataset); - CHECK(status, FAIL, "H5Dclose"); - - status = H5Tclose(type); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Fclose(fid); - CHECK(status, FAIL, "H5Fclose"); - - /****************************************************/ - /* Reopen the file and print out all the data again */ - /****************************************************/ - - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - dataset = H5Dopen2(fid, FIELDNAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - type = H5Dget_type(dataset); - CHECK(type, FAIL, "H5Dget_type"); - - /* Reset the data to read in */ - /* ------------------------- */ - memset(cfr, 0, sizeof(CmpField) * LENGTH); - - status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr); - CHECK(status, FAIL, "H5Dread"); - - /* Verify correct data */ - /* ------------------- */ - for (i = 0; i < LENGTH; i++) { - for (j = 0; j < ALEN; j++) { - if (cf[i].a[j] != cfr[i].a[j]) { - TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i, - (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) { - TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, - (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]); - continue; - } /* end if */ - if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) { - TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i, - (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - status = H5Dclose(dataset); - CHECK(status, FAIL, "H5Dclose"); - - status = H5Tclose(type); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Fclose(fid); - CHECK(status, FAIL, "H5Fclose"); - - free(dtsinfo); -} /* end test_array_bkg() */ - -/*------------------------------------------------------------------------- - * Function: test_compat - * - * Purpose: Test array datatype compatibility code. - * - * Reads file containing old version of datatype object header - * messages for compound datatypes and verifies reading the older - * version of the is working correctly. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -#if 0 -static void -test_compat(void) -{ - const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t tid1; /* Array Datatype ID */ - hid_t tid2; /* Datatype ID */ - hsize_t tdims1[] = {ARRAY1_DIM1}; - int ndims; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - H5T_class_t mclass; /* Datatype class for VL */ - int nmemb; /* Number of compound members */ - char *mname; /* Name of compound field */ - size_t off; /* Offset of compound field */ - hid_t mtid; /* Datatype ID for field */ - int i; /* Index variables */ - bool driver_is_default_compatible; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Array Datatypes Compatibility Functionality\n")); - - /* - * Try reading a file that has been prepared that has datasets with - * compound datatypes which use an older version (version 1) of the - * datatype object header message for describing the datatype. - * - * If this test fails and the datatype object header message version has - * changed, follow the instructions in gen_old_array.c for regenerating - * the tarrold.h5 file. - */ - - if (h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible) < 0) - TestErrPrintf("can't check if VFD is default VFD compatible\n"); - if (!driver_is_default_compatible) { - printf(" -- SKIPPED --\n"); - return; - } - - /* Open the testfile */ - fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK_I(fid1, "H5Fopen"); - - /* Only try to proceed if the file is around */ - if (fid1 >= 0) { - /* Open the first dataset (with no array fields) */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK_I(dataset, "H5Dopen2"); - - /* Get the datatype */ - tid1 = H5Dget_type(dataset); - CHECK_I(tid1, "H5Dget_type"); - - /* Verify datatype class */ - mclass = H5Tget_class(tid1); - VERIFY(mclass, H5T_COMPOUND, "H5Tget_class"); - - /* Get the number of compound datatype fields */ - nmemb = H5Tget_nmembers(tid1); - VERIFY(nmemb, 3, "H5Tget_nmembers"); - - /* Check the 1st field's name */ - mname = H5Tget_member_name(tid1, 0); - CHECK_PTR(mname, "H5Tget_member_name"); - if (strcmp(mname, "i") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - H5free_memory(mname); - - /* Check the 1st field's offset */ - off = H5Tget_member_offset(tid1, 0); - VERIFY(off, 0, "H5Tget_member_offset"); - - /* Check the 1st field's datatype */ - mtid = H5Tget_member_type(tid1, 0); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_STD_I16LE)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Check the 2nd field's name */ - mname = H5Tget_member_name(tid1, 1); - CHECK_PTR(mname, "H5Tget_member_name"); - if (strcmp(mname, "f") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - H5free_memory(mname); - - /* Check the 2nd field's offset */ - off = H5Tget_member_offset(tid1, 1); - VERIFY(off, 4, "H5Tget_member_offset"); - - /* Check the 2nd field's datatype */ - mtid = H5Tget_member_type(tid1, 1); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_IEEE_F32LE)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Check the 3rd field's name */ - mname = H5Tget_member_name(tid1, 2); - CHECK_PTR(mname, "H5Tget_member_name"); - if (strcmp(mname, "l") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - H5free_memory(mname); - - /* Check the 3rd field's offset */ - off = H5Tget_member_offset(tid1, 2); - VERIFY(off, 8, "H5Tget_member_offset"); - - /* Check the 3rd field's datatype */ - mtid = H5Tget_member_type(tid1, 2); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_STD_I32LE)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Close the datatype */ - ret = H5Tclose(tid1); - CHECK_I(ret, "H5Tclose"); - - /* Close the dataset */ - ret = H5Dclose(dataset); - CHECK_I(ret, "H5Dclose"); - - /* Open the second dataset (with array fields) */ - dataset = H5Dopen2(fid1, "Dataset2", H5P_DEFAULT); - CHECK_I(dataset, "H5Dopen2"); - - /* Get the datatype */ - tid1 = H5Dget_type(dataset); - CHECK_I(tid1, "H5Dget_type"); - - /* Verify datatype class */ - mclass = H5Tget_class(tid1); - VERIFY(mclass, H5T_COMPOUND, "H5Tget_class"); - - /* Get the number of compound datatype fields */ - nmemb = H5Tget_nmembers(tid1); - VERIFY(nmemb, 4, "H5Tget_nmembers"); - - /* Check the 1st field's name */ - mname = H5Tget_member_name(tid1, 0); - CHECK_PTR(mname, "H5Tget_member_name"); - if (mname && strcmp(mname, "i") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - if (mname) - H5free_memory(mname); - - /* Check the 1st field's offset */ - off = H5Tget_member_offset(tid1, 0); - VERIFY(off, 0, "H5Tget_member_offset"); - - /* Check the 1st field's datatype */ - mtid = H5Tget_member_type(tid1, 0); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_STD_I16LE)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Check the 2nd field's name */ - mname = H5Tget_member_name(tid1, 1); - CHECK_PTR(mname, "H5Tget_member_name"); - if (mname && strcmp(mname, "f") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - if (mname) - H5free_memory(mname); - - /* Check the 2nd field's offset */ - off = H5Tget_member_offset(tid1, 1); - VERIFY(off, 4, "H5Tget_member_offset"); - - /* Check the 2nd field's datatype */ - mtid = H5Tget_member_type(tid1, 1); - CHECK(mtid, FAIL, "H5Tget_member_type"); - - /* Verify datatype class */ - mclass = H5Tget_class(mtid); - VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(mtid); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(mtid, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Check the array's base datatype */ - tid2 = H5Tget_super(mtid); - CHECK(tid2, FAIL, "H5Tget_super"); - - if ((ret = H5Tequal(tid2, H5T_IEEE_F32LE)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(mtid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Check the 3rd field's name */ - mname = H5Tget_member_name(tid1, 2); - CHECK_PTR(mname, "H5Tget_member_name"); - if (mname && strcmp(mname, "l") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - if (mname) - H5free_memory(mname); - - /* Check the 3rd field's offset */ - off = H5Tget_member_offset(tid1, 2); - VERIFY(off, 20, "H5Tget_member_offset"); - - /* Check the 3rd field's datatype */ - mtid = H5Tget_member_type(tid1, 2); - CHECK(mtid, FAIL, "H5Tget_member_type"); - - /* Verify datatype class */ - mclass = H5Tget_class(mtid); - VERIFY(mclass, H5T_ARRAY, "H5Tget_class"); - - /* Check the array rank */ - ndims = H5Tget_array_ndims(mtid); - VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims"); - - /* Get the array dimensions */ - ret = H5Tget_array_dims2(mtid, rdims1); - CHECK(ret, FAIL, "H5Tget_array_dims2"); - - /* Check the array dimensions */ - for (i = 0; i < ndims; i++) - if (rdims1[i] != tdims1[i]) { - TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE - ", tdims1[%d]=%" PRIuHSIZE "\n", - i, rdims1[i], i, tdims1[i]); - continue; - } /* end if */ - - /* Check the array's base datatype */ - tid2 = H5Tget_super(mtid); - CHECK(tid2, FAIL, "H5Tget_super"); - - if ((ret = H5Tequal(tid2, H5T_STD_I32LE)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(mtid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Check the 4th field's name */ - mname = H5Tget_member_name(tid1, 3); - CHECK_PTR(mname, "H5Tget_member_name"); - if (mname && strcmp(mname, "d") != 0) - TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname); - if (mname) - H5free_memory(mname); - - /* Check the 4th field's offset */ - off = H5Tget_member_offset(tid1, 3); - VERIFY(off, 36, "H5Tget_member_offset"); - - /* Check the 4th field's datatype */ - mtid = H5Tget_member_type(tid1, 3); - CHECK(mtid, FAIL, "H5Tget_member_type"); - if ((ret = H5Tequal(mtid, H5T_IEEE_F64LE)) <= 0) - TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret); - ret = H5Tclose(mtid); - CHECK(mtid, FAIL, "H5Tclose"); - - /* Close the datatype */ - ret = H5Tclose(tid1); - CHECK_I(ret, "H5Tclose"); - - /* Close the dataset */ - ret = H5Dclose(dataset); - CHECK_I(ret, "H5Dclose"); - - /* Close the file */ - ret = H5Fclose(fid1); - CHECK_I(ret, "H5Fclose"); - } /* end if */ - else - printf("***cannot open the pre-created compound datatype test file (%s)\n", testfile); - -} /* end test_compat() */ -#endif - -/*------------------------------------------------------------------------- - * Function: test_array - * - * Purpose: Main array datatype testing routine. - * - * Return: void - * - *------------------------------------------------------------------------- - */ -void -test_array(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing Array Datatypes\n")); - - /* These tests use the same file... */ - test_array_atomic_1d(); /* Test 1-D array of atomic datatypes */ - test_array_atomic_3d(); /* Test 3-D array of atomic datatypes */ - test_array_array_atomic(); /* Test 1-D array of 2-D arrays of atomic datatypes */ - test_array_compound_atomic(); /* Test 1-D array of compound datatypes (with no array fields) */ - test_array_compound_array(); /* Test 1-D array of compound datatypes (with array fields) */ - test_array_vlen_atomic(); /* Test 1-D array of atomic VL datatypes */ - test_array_vlen_array(); /* Test 1-D array of 1-D array VL datatypes */ - test_array_funcs(); /* Test type functions with array types */ - - test_array_bkg(); /* Read compound datatype with array fields and background fields read */ -#if 0 - /* This test uses a custom file */ - test_compat(); /* Test compatibility changes for compound datatype fields */ -#endif -} /* end test_array() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_array - * - * Purpose: Cleanup temporary test files - * - * Return: void - * - *------------------------------------------------------------------------- - */ -void -cleanup_array(void) -{ - H5Fdelete(FILENAME, H5P_DEFAULT); -} /* end cleanup_array() */ diff --git a/test/API/tattr.c b/test/API/tattr.c deleted file mode 100644 index c4ae9f90fa2..00000000000 --- a/test/API/tattr.c +++ /dev/null @@ -1,11923 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tattr - * - * Test the attribute functionality - * - *************************************************************/ - -#include "testhdf5.h" - -#if 0 -#include "H5VLnative_private.h" - -/* - * This file needs to access private information from the H5O package. - * This file also needs to access the object header testing code. - */ -#define H5O_FRIEND /*suppress error about including H5Opkg */ -#define H5O_TESTING -#include "H5Opkg.h" /* Object headers */ - -/* - * This file needs to access private information from the H5A package. - * This file also needs to access the attribute testing code. - */ -#define H5A_FRIEND /*suppress error about including H5Apkg */ -#define H5A_TESTING -#include "H5Apkg.h" /* Attributes */ - -/* - * This file needs to access private information from the H5F package. - * This file also needs to access the file testing code. - */ -#define H5F_FRIEND /*suppress error about including H5Fpkg */ -#define H5F_TESTING -#include "H5Fpkg.h" /* File access */ -#endif - -#define FILENAME "tattr.h5" -#define NAME_BUF_SIZE 1024 -#define ATTR_NAME_LEN 16 -#define ATTR_MAX_DIMS 7 -#define ATTR_TMP_NAME "a really long temp_name" -#define CORDER_ITER_STOP 3 - -/* 3-D dataset with fixed dimensions */ -#define SPACE1_RANK 3 -#define SPACE1_DIM1 3 -#define SPACE1_DIM2 15 -#define SPACE1_DIM3 13 - -/* Dataset Information */ -#define DSET1_NAME "Dataset1" -#define DSET2_NAME "Dataset2" -#define DSET3_NAME "Dataset3" -#define NUM_DSETS 3 - -/* Group Information */ -#define GROUP1_NAME "/Group1" -#define GROUP2_NAME "/Group2" -#define GROUP3_NAME "/Group3" - -/* Named Datatype Information */ -#define TYPE1_NAME "/Type" - -/* Attribute Rank & Dimensions */ -#define ATTR1_NAME "Attr1" -#define ATTR1_RANK 1 -#define ATTR1_DIM1 3 -int attr_data1[ATTR1_DIM1] = {512, -234, 98123}; /* Test data for 1st attribute */ - -/* rank & dimensions for another attribute */ -#define ATTR1A_NAME "Attr1_a" -int attr_data1a[ATTR1_DIM1] = {256, 11945, -22107}; - -#define ATTR2_NAME "Attr2" -#define ATTR2_RANK 2 -#define ATTR2_DIM1 2 -#define ATTR2_DIM2 2 -int attr_data2[ATTR2_DIM1][ATTR2_DIM2] = {{7614, -416}, {197814, -3}}; /* Test data for 2nd attribute */ - -#define ATTR3_NAME "Attr3" -#define ATTR3_RANK 3 -#define ATTR3_DIM1 2 -#define ATTR3_DIM2 2 -#define ATTR3_DIM3 2 -double attr_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3] = { - {{2.3, -26.1}, {0.123, -10.0}}, {{973.23, -0.91827}, {2.0, 23.0}}}; /* Test data for 3rd attribute */ - -#define ATTR4_NAME "Attr4" -#define ATTR4_RANK 2 -#define ATTR4_DIM1 2 -#define ATTR4_DIM2 2 -#define ATTR4_FIELDNAME1 "i" -#define ATTR4_FIELDNAME2 "d" -#define ATTR4_FIELDNAME3 "c" -size_t attr4_field1_off = 0; -size_t attr4_field2_off = 0; -size_t attr4_field3_off = 0; -struct attr4_struct { - int i; - double d; - char c; -} attr_data4[ATTR4_DIM1][ATTR4_DIM2] = { - {{3, -26.1, 'd'}, {-100000, 0.123, '3'}}, - {{-23, 981724.2, 'Q'}, {0, 2.0, '\n'}}}; /* Test data for 4th attribute */ - -#define ATTR5_NAME "Attr5" -#define ATTR5_RANK 0 -float attr_data5 = -5.123F; /* Test data for 5th attribute */ - -#define ATTR6_RANK 3 -#define ATTR6_DIM1 100 -#define ATTR6_DIM2 100 -#define ATTR6_DIM3 100 - -#define ATTR7_NAME "attr 1 - 000000" -#define ATTR8_NAME "attr 2" - -#define LINK1_NAME "Link1" - -#define NATTR_MANY_OLD 350 -#define NATTR_MANY_NEW 3500 - -#define BUG2_NATTR 100 -#define BUG2_NATTR2 16 - -#define BUG3_DSET_NAME "dset" -#define BUG3_DT_NAME "dt" -#define BUG3_ATTR_NAME "attr" - -/* Used by test_attr_delete_last_dense() */ -#define GRPNAME "grp" -#define ATTRNAME "attr" -#define DIM0 100 -#define DIM1 100 -#define RANK 2 - -/* Used by test_attr_info_null_info_pointer() */ -#define GET_INFO_NULL_POINTER_ATTR_NAME "NullInfoPointerAttr" - -/* Used by test_attr_rename_invalid_name() */ -#define INVALID_RENAME_TEST_ATTR_NAME "InvalidRenameTestAttr" -#define INVALID_RENAME_TEST_NEW_ATTR_NAME "InvalidRenameTestNewAttr" - -/* Used by test_attr_get_name_invalid_buf() */ -#define GET_NAME_INVALID_BUF_TEST_ATTR_NAME "InvalidNameBufferTestAttr" - -/* Attribute iteration struct */ -typedef struct { - H5_iter_order_t order; /* Direction of iteration */ - unsigned ncalled; /* # of times callback is entered */ - unsigned nskipped; /* # of attributes skipped */ - int stop; /* # of iterations to stop after */ - hsize_t curr; /* Current creation order value */ - size_t max_visit; /* Size of "visited attribute" flag array */ - bool *visited; /* Pointer to array of "visited attribute" flags */ -} attr_iter_info_t; - -static herr_t attr_op1(hid_t loc_id, const char *name, const H5A_info_t *ainfo, void *op_data); - -/* Global dcpl ID, can be re-set as a generated dcpl for various operations - * across multiple tests. - * e.g., minimized dataset object headers - */ -static hid_t dcpl_g = H5P_DEFAULT; - -/**************************************************************** -** -** test_attr_basic_write(): Test basic H5A (attribute) code. -** Tests integer attributes on both datasets and groups -** -****************************************************************/ -static void -test_attr_basic_write(hid_t fapl) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t group; /* Group ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t attr, attr2; /* Attribute ID */ -#if 0 - hsize_t attr_size; /* storage size for attribute */ -#endif - ssize_t attr_name_size; /* size of attribute name */ - char *attr_name = NULL; /* name of attribute */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {ATTR1_DIM1}; - hsize_t dims3[] = {ATTR2_DIM1, ATTR2_DIM2}; - int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ - int i; - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Scalar Attribute Writing Functions\n")); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Create dataspace for attribute */ - sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Try to create an attribute on the file (should create an attribute on root group) */ - attr = H5Acreate2(fid1, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open the root group */ - group = H5Gopen2(fid1, "/", H5P_DEFAULT); - CHECK(group, FAIL, "H5Gopen2"); - - /* Open attribute again */ - attr = H5Aopen(group, ATTR1_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close root group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create an attribute for the dataset */ - attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to create the same attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Acreate2"); - } - - /* Write attribute information */ - ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); - CHECK(ret, FAIL, "H5Awrite"); - - /* Create an another attribute for the dataset */ - attr2 = H5Acreate2(dataset, ATTR1A_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write attribute information */ - ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1a); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check storage size for attribute */ - attr_size = H5Aget_storage_size(attr); - VERIFY(attr_size, (ATTR1_DIM1 * sizeof(int)), "H5A_get_storage_size"); -#endif - /* Read attribute information immediately, without closing attribute */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close attribute */ - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* change attribute name */ - ret = H5Arename(dataset, ATTR1_NAME, ATTR_TMP_NAME); - CHECK(ret, FAIL, "H5Arename"); - - /* Open attribute again */ - attr = H5Aopen(dataset, ATTR_TMP_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Verify new attribute name */ - attr_name_size = H5Aget_name(attr, (size_t)0, NULL); - CHECK(attr_name_size, FAIL, "H5Aget_name"); - - if (attr_name_size > 0) { - attr_name = (char *)calloc((size_t)(attr_name_size + 1), sizeof(char)); - CHECK_PTR(attr_name, "calloc"); - - if (attr_name) { - ret = (herr_t)H5Aget_name(attr, (size_t)(attr_name_size + 1), attr_name); - CHECK(ret, FAIL, "H5Aget_name"); - ret = strcmp(attr_name, ATTR_TMP_NAME); - VERIFY(ret, 0, "strcmp"); - - free(attr_name); - attr_name = NULL; - } /* end if */ - } /* end if */ - - /* Read attribute information immediately, without closing attribute */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open the second attribute again */ - attr2 = H5Aopen(dataset, ATTR1A_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Verify new attribute name */ - attr_name_size = H5Aget_name(attr2, (size_t)0, NULL); - CHECK(attr_name_size, FAIL, "H5Aget_name"); - - if (attr_name_size > 0) { - attr_name = (char *)calloc((size_t)(attr_name_size + 1), sizeof(char)); - CHECK_PTR(attr_name, "calloc"); - - if (attr_name) { - ret = (herr_t)H5Aget_name(attr2, (size_t)(attr_name_size + 1), attr_name); - CHECK(ret, FAIL, "H5Aget_name"); - ret = strcmp(attr_name, ATTR1A_NAME); - VERIFY(ret, 0, "strcmp"); - - free(attr_name); - attr_name = NULL; - } /* end if */ - } /* end if */ - - /* Read attribute information immediately, without closing attribute */ - ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1a[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1a[%d]=%d, read_data1[%d]=%d\n", __LINE__, - i, attr_data1a[i], i, read_data1[i]); - - /* Close attribute */ - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create group */ - group = H5Gcreate2(fid1, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, FAIL, "H5Gcreate2"); - - /* Create dataspace for attribute */ - sid2 = H5Screate_simple(ATTR2_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create an attribute for the group */ - attr = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check storage size for attribute */ - attr_size = H5Aget_storage_size(attr); - VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5Aget_storage_size"); -#endif - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to create the same attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret_id = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Acreate2"); - } - - /* Write attribute information */ - ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data2); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check storage size for attribute */ - attr_size = H5Aget_storage_size(attr); - VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5A_get_storage_size"); -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Attribute dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_basic_write() */ - -/**************************************************************** -** -** test_attr_basic_read(): Test basic H5A (attribute) code. -** -****************************************************************/ -static void -test_attr_basic_read(hid_t fapl) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t group; /* Group ID */ - hid_t attr; /* Attribute ID */ - H5O_info2_t oinfo; /* Object info */ - int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ - int read_data2[ATTR2_DIM1][ATTR2_DIM2] = {{0}}; /* Buffer for reading 2nd attribute */ - int i, j; /* Local index variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Attribute Functions\n")); - - /* Create file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 2, "H5Oget_info3"); - - /* Open first attribute for the dataset */ - attr = H5Aopen(dataset, ATTR_TMP_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open the group */ - group = H5Gopen2(fid1, GROUP1_NAME, H5P_DEFAULT); - CHECK(group, FAIL, "H5Gopen2"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(group, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); - - /* Open the attribute for the group */ - attr = H5Aopen(group, ATTR2_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data2); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR2_DIM1; i++) - for (j = 0; j < ATTR2_DIM2; j++) - if (attr_data2[i][j] != read_data2[i][j]) - TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n", - __LINE__, i, j, attr_data2[i][j], i, j, read_data1[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_basic_read() */ - -/**************************************************************** -** -** test_attr_flush(): Test H5A (attribute) code for performing -** I/O when H5Fflush is used. -** -****************************************************************/ -static void -test_attr_flush(hid_t fapl) -{ - hid_t fil, /* File ID */ - att, /* Attribute ID */ - spc, /* Dataspace ID */ - set; /* Dataset ID */ - double wdata = 3.14159; /* Data to write */ - double rdata; /* Data read in */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Attribute Flushing\n")); - - fil = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fil, FAIL, "H5Fcreate"); - - spc = H5Screate(H5S_SCALAR); - CHECK(spc, FAIL, "H5Screate"); - - set = H5Dcreate2(fil, DSET1_NAME, H5T_NATIVE_DOUBLE, spc, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(set, FAIL, "H5Dcreate2"); - - att = H5Acreate2(set, ATTR1_NAME, H5T_NATIVE_DOUBLE, spc, H5P_DEFAULT, H5P_DEFAULT); - CHECK(att, FAIL, "H5Acreate2"); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_FILL_VALUES) && - (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { - ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata); - CHECK(ret, FAIL, "H5Aread"); - - if (!H5_DBL_ABS_EQUAL(rdata, 0.0)) - TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, 0.0); - - ret = H5Fflush(fil, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata); - CHECK(ret, FAIL, "H5Awrite"); - - if (!H5_DBL_ABS_EQUAL(rdata, 0.0)) - TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, 0.0); - } - else { - printf("** SKIPPED attribute pre-read due to fill values not being supported **\n"); - } - - ret = H5Awrite(att, H5T_NATIVE_DOUBLE, &wdata); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata); - CHECK(ret, FAIL, "H5Awrite"); - - if (!H5_DBL_ABS_EQUAL(rdata, wdata)) - TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, wdata); - - ret = H5Sclose(spc); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Aclose(att); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Dclose(set); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fil); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_flush() */ - -/**************************************************************** -** -** test_attr_plist(): Test Attribute Creation Property Lists -** -****************************************************************/ -static void -test_attr_plist(hid_t fapl) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ - hid_t did = H5I_INVALID_HID; /* Dataset ID */ - hid_t dsid = H5I_INVALID_HID; /* Dataspace ID (for dataset) */ - hid_t asid = H5I_INVALID_HID; /* Dataspace ID (for attribute) */ - hid_t aid = H5I_INVALID_HID; /* Attribute ID */ - hid_t acpl_id = H5I_INVALID_HID; /* Attribute creation property list ID */ - hid_t aapl_id = H5I_INVALID_HID; /* Attribute access property list ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {ATTR1_DIM1}; - H5T_cset_t cset; /* Character set for attributes */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Attribute Property Lists\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for dataset */ - dsid = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(dsid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset */ - did = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, dsid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create dataspace for attribute */ - asid = H5Screate_simple(ATTR1_RANK, dims2, NULL); - CHECK(asid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create default creation property list for attribute */ - acpl_id = H5Pcreate(H5P_ATTRIBUTE_CREATE); - CHECK(acpl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create default access property list for attribute - * This currently has no properties, but we need to test its creation - * and use. - */ - aapl_id = H5Pcreate(H5P_ATTRIBUTE_ACCESS); - CHECK(aapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Get the character encoding and ensure that it is the default (ASCII) */ - ret = H5Pget_char_encoding(acpl_id, &cset); - CHECK(ret, FAIL, "H5Pget_char_encoding"); - VERIFY(cset, H5T_CSET_ASCII, "H5Pget_char_encoding"); - - /* Create an attribute for the dataset using the property list */ - aid = H5Acreate2(did, ATTR1_NAME, H5T_NATIVE_INT, asid, acpl_id, aapl_id); - CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); - - /* Close the property list, and get the attribute's creation property list */ - ret = H5Pclose(acpl_id); - CHECK(ret, FAIL, "H5Pclose"); - acpl_id = H5Aget_create_plist(aid); - CHECK(acpl_id, H5I_INVALID_HID, "H5Aget_create_plist"); - - /* Get the character encoding and ensure that it is the default (ASCII) */ - ret = H5Pget_char_encoding(acpl_id, &cset); - CHECK(ret, FAIL, "H5Pget_char_encoding"); - VERIFY(cset, H5T_CSET_ASCII, "H5Pget_char_encoding"); - - /* Close the property list and attribute */ - ret = H5Pclose(acpl_id); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create a new property list and modify it to use a different encoding */ - acpl_id = H5Pcreate(H5P_ATTRIBUTE_CREATE); - CHECK(acpl_id, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_char_encoding(acpl_id, H5T_CSET_UTF8); - CHECK(ret, FAIL, "H5Pset_char_encoding"); - - /* Get the character encoding and ensure that it has been changed */ - ret = H5Pget_char_encoding(acpl_id, &cset); - CHECK(ret, FAIL, "H5Pget_char_encoding"); - VERIFY(cset, H5T_CSET_UTF8, "H5Pget_char_encoding"); - - /* Create an attribute for the dataset using the modified property list */ - aid = H5Acreate2(did, ATTR2_NAME, H5T_NATIVE_INT, asid, acpl_id, aapl_id); - CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); - - /* Close the property list and attribute */ - ret = H5Pclose(acpl_id); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Re-open the second attribute and ensure that its character encoding is correct */ - aid = H5Aopen(did, ATTR2_NAME, H5P_DEFAULT); - CHECK(aid, H5I_INVALID_HID, "H5Aopen"); - acpl_id = H5Aget_create_plist(aid); - CHECK(acpl_id, H5I_INVALID_HID, "H5Aget_create_plist"); - ret = H5Pget_char_encoding(acpl_id, &cset); - CHECK(ret, FAIL, "H5Pget_char_encoding"); - VERIFY(cset, H5T_CSET_UTF8, "H5Pget_char_encoding"); - - /* Close everything */ - ret = H5Sclose(dsid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(asid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(aapl_id); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(acpl_id); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_plist() */ - -/**************************************************************** -** -** test_attr_compound_write(): Test H5A (attribute) code. -** Tests compound datatype attributes -** -****************************************************************/ -static void -test_attr_compound_write(hid_t fapl) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t tid1; /* Attribute datatype ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {ATTR4_DIM1, ATTR4_DIM2}; - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Multiple Attribute Functions\n")); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close dataset's dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create the attribute datatype. */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct attr4_struct)); - CHECK(tid1, FAIL, "H5Tcreate"); - attr4_field1_off = HOFFSET(struct attr4_struct, i); - ret = H5Tinsert(tid1, ATTR4_FIELDNAME1, attr4_field1_off, H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - attr4_field2_off = HOFFSET(struct attr4_struct, d); - ret = H5Tinsert(tid1, ATTR4_FIELDNAME2, attr4_field2_off, H5T_NATIVE_DOUBLE); - CHECK(ret, FAIL, "H5Tinsert"); - attr4_field3_off = HOFFSET(struct attr4_struct, c); - ret = H5Tinsert(tid1, ATTR4_FIELDNAME3, attr4_field3_off, H5T_NATIVE_SCHAR); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create dataspace for 1st attribute */ - sid2 = H5Screate_simple(ATTR4_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create complex attribute for the dataset */ - attr = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to create the same attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret_id = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Acreate2"); - } - - /* Write complex attribute data */ - ret = H5Awrite(attr, tid1, attr_data4); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close attribute's dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close attribute's datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_compound_write() */ - -/**************************************************************** -** -** test_attr_compound_read(): Test basic H5A (attribute) code. -** -****************************************************************/ -static void -test_attr_compound_read(hid_t fapl) -{ - hid_t fid1; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t space; /* Attribute dataspace */ - hid_t type; /* Attribute datatype */ - hid_t attr; /* Attribute ID */ - char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */ - int rank; /* Attribute rank */ - hsize_t dims[ATTR_MAX_DIMS]; /* Attribute dimensions */ - H5T_class_t t_class; /* Attribute datatype class */ - H5T_order_t order; /* Attribute datatype order */ - size_t size; /* Attribute datatype size as stored in file */ - int fields; /* # of Attribute datatype fields */ - char *fieldname; /* Name of a field */ - size_t offset; /* Attribute datatype field offset */ - hid_t field; /* Attribute field datatype */ - struct attr4_struct read_data4[ATTR4_DIM1][ATTR4_DIM2]; /* Buffer for reading 4th attribute */ - ssize_t name_len; /* Length of attribute name */ - H5O_info2_t oinfo; /* Object info */ - int i, j; /* Local index variables */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Attribute Functions\n")); - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); - - /* Open 1st attribute for the dataset */ - attr = - H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Dataspace */ - space = H5Aget_space(attr); - CHECK(space, FAIL, "H5Aget_space"); - rank = H5Sget_simple_extent_ndims(space); - VERIFY(rank, ATTR4_RANK, "H5Sget_simple_extent_ndims"); - ret = H5Sget_simple_extent_dims(space, dims, NULL); - CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); - if (dims[0] != ATTR4_DIM1) - TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR4_DIM1); - if (dims[1] != ATTR4_DIM2) - TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR4_DIM2); - H5Sclose(space); - - /* Verify Datatype */ - type = H5Aget_type(attr); - CHECK(type, FAIL, "H5Aget_type"); - t_class = H5Tget_class(type); - VERIFY(t_class, H5T_COMPOUND, "H5Tget_class"); - fields = H5Tget_nmembers(type); - VERIFY(fields, 3, "H5Tget_nmembers"); - for (i = 0; i < fields; i++) { - fieldname = H5Tget_member_name(type, (unsigned)i); - if (!(strcmp(fieldname, ATTR4_FIELDNAME1) != 0 || strcmp(fieldname, ATTR4_FIELDNAME2) != 0 || - strcmp(fieldname, ATTR4_FIELDNAME3) != 0)) - TestErrPrintf("invalid field name for field #%d: %s\n", i, fieldname); - H5free_memory(fieldname); - } /* end for */ - offset = H5Tget_member_offset(type, 0); - VERIFY(offset, attr4_field1_off, "H5Tget_member_offset"); - offset = H5Tget_member_offset(type, 1); - VERIFY(offset, attr4_field2_off, "H5Tget_member_offset"); - offset = H5Tget_member_offset(type, 2); - VERIFY(offset, attr4_field3_off, "H5Tget_member_offset"); - - /* Verify each field's type, class & size */ - field = H5Tget_member_type(type, 0); - CHECK(field, FAIL, "H5Tget_member_type"); - t_class = H5Tget_class(field); - VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); - order = H5Tget_order(field); - VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order"); - size = H5Tget_size(field); - VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size"); - H5Tclose(field); - field = H5Tget_member_type(type, 1); - CHECK(field, FAIL, "H5Tget_member_type"); - t_class = H5Tget_class(field); - VERIFY(t_class, H5T_FLOAT, "H5Tget_class"); - order = H5Tget_order(field); - VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_DOUBLE), H5T_order_t, "%d", "H5Tget_order"); - size = H5Tget_size(field); - VERIFY(size, H5Tget_size(H5T_NATIVE_DOUBLE), "H5Tget_size"); - H5Tclose(field); - field = H5Tget_member_type(type, 2); - CHECK(field, FAIL, "H5Tget_member_type"); - t_class = H5Tget_class(field); - VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); - order = H5Tget_order(field); - VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_SCHAR), H5T_order_t, "%d", "H5Tget_order"); - size = H5Tget_size(field); - VERIFY(size, H5Tget_size(H5T_NATIVE_SCHAR), "H5Tget_size"); - H5Tclose(field); - - /* Read attribute information */ - ret = H5Aread(attr, type, read_data4); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR4_DIM1; i++) - for (j = 0; j < ATTR4_DIM2; j++) - if (memcmp(&attr_data4[i][j], &read_data4[i][j], sizeof(struct attr4_struct)) != 0) { - printf("%d: attribute data different: attr_data4[%d][%d].i=%d, read_data4[%d][%d].i=%d\n", - __LINE__, i, j, attr_data4[i][j].i, i, j, read_data4[i][j].i); - printf("%d: attribute data different: attr_data4[%d][%d].d=%f, read_data4[%d][%d].d=%f\n", - __LINE__, i, j, attr_data4[i][j].d, i, j, read_data4[i][j].d); - TestErrPrintf( - "%d: attribute data different: attr_data4[%d][%d].c=%c, read_data4[%d][%d].c=%c\n", - __LINE__, i, j, attr_data4[i][j].c, i, j, read_data4[i][j].c); - } /* end if */ - - /* Verify Name */ - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); - VERIFY(name_len, strlen(ATTR4_NAME), "H5Aget_name"); - if (strcmp(attr_name, ATTR4_NAME) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR4_NAME); - - /* Close attribute datatype */ - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_compound_read() */ - -/**************************************************************** -** -** test_attr_scalar_write(): Test scalar H5A (attribute) writing code. -** -****************************************************************/ -static void -test_attr_scalar_write(hid_t fapl) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Attribute Functions\n")); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Create dataspace for attribute */ - sid2 = H5Screate_simple(ATTR5_RANK, NULL, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create an attribute for the dataset */ - attr = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to create the same attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret_id = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Acreate2"); - } - - /* Write attribute information */ - ret = H5Awrite(attr, H5T_NATIVE_FLOAT, &attr_data5); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_scalar_write() */ - -/**************************************************************** -** -** test_attr_scalar_read(): Test scalar H5A (attribute) reading code. -** -****************************************************************/ -static void -test_attr_scalar_read(hid_t fapl) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - H5S_class_t stype; /* Dataspace class */ - float rdata = 0.0F; /* Buffer for reading 1st attribute */ - H5O_info2_t oinfo; /* Object info */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Scalar Attribute Reading Functions\n")); - - /* Create file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); - - /* Open an attribute for the dataset */ - attr = H5Aopen(dataset, ATTR5_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_FLOAT, &rdata); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify the floating-poing value in this way to avoid compiler warning. */ - if (!H5_FLT_ABS_EQUAL(rdata, attr_data5)) - printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Aread", - (double)attr_data5, (double)rdata, (int)__LINE__, __FILE__); - - /* Get the attribute's dataspace */ - sid = H5Aget_space(attr); - CHECK(sid, FAIL, "H5Aget_space"); - - /* Make certain the dataspace is scalar */ - stype = H5Sget_simple_extent_type(sid); - VERIFY(stype, H5S_SCALAR, "H5Sget_simple_extent_type"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_scalar_read() */ - -/**************************************************************** -** -** test_attr_mult_write(): Test basic H5A (attribute) code. -** Tests integer attributes on both datasets and groups -** -****************************************************************/ -static void -test_attr_mult_write(hid_t fapl) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {ATTR1_DIM1}; - hsize_t dims3[] = {ATTR2_DIM1, ATTR2_DIM2}; - hsize_t dims4[] = {ATTR3_DIM1, ATTR3_DIM2, ATTR3_DIM3}; - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Multiple Attribute Functions\n")); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close dataset's dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for 1st attribute */ - sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create 1st attribute for the dataset */ - attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to create the same attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Acreate2"); - } - - /* Write 1st attribute data */ - ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close 1st attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close attribute's dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for 2nd attribute */ - sid2 = H5Screate_simple(ATTR2_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create 2nd attribute for the dataset */ - attr = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to create the same attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret_id = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Acreate2"); - } - - /* Write 2nd attribute information */ - ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data2); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close 2nd attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close 2nd attribute's dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for 3rd attribute */ - sid2 = H5Screate_simple(ATTR3_RANK, dims4, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create 3rd attribute for the dataset */ - attr = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to create the same attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret_id = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Acreate2"); - } - - /* Write 3rd attribute information */ - ret = H5Awrite(attr, H5T_NATIVE_DOUBLE, attr_data3); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close 3rd attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close 3rd attribute's dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_mult_write() */ - -/**************************************************************** -** -** test_attr_mult_read(): Test basic H5A (attribute) code. -** -****************************************************************/ -static void -test_attr_mult_read(hid_t fapl) -{ - hid_t fid1; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t space; /* Attribute dataspace */ - hid_t type; /* Attribute datatype */ - hid_t attr; /* Attribute ID */ - char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */ - char temp_name[ATTR_NAME_LEN]; /* Buffer for mangling attribute names */ - int rank; /* Attribute rank */ - hsize_t dims[ATTR_MAX_DIMS]; /* Attribute dimensions */ - H5T_class_t t_class; /* Attribute datatype class */ - H5T_order_t order; /* Attribute datatype order */ - size_t size; /* Attribute datatype size as stored in file */ - int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ - int read_data2[ATTR2_DIM1][ATTR2_DIM2] = {{0}}; /* Buffer for reading 2nd attribute */ - double read_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3] = {{{0}}}; /* Buffer for reading 3rd attribute */ - ssize_t name_len; /* Length of attribute name */ - H5O_info2_t oinfo; /* Object info */ - int i, j, k; /* Local index values */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Attribute Functions\n")); - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); - - /* Open 1st attribute for the dataset */ - attr = - H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Dataspace */ - space = H5Aget_space(attr); - CHECK(space, FAIL, "H5Aget_space"); - rank = H5Sget_simple_extent_ndims(space); - VERIFY(rank, ATTR1_RANK, "H5Sget_simple_extent_ndims"); - ret = H5Sget_simple_extent_dims(space, dims, NULL); - CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); - if (dims[0] != ATTR1_DIM1) - TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR1_DIM1); - H5Sclose(space); - - /* Verify Datatype */ - type = H5Aget_type(attr); - CHECK(type, FAIL, "H5Aget_type"); - t_class = H5Tget_class(type); - VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); - order = H5Tget_order(type); - VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order"); - size = H5Tget_size(type); - VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size"); - H5Tclose(type); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Verify Name */ - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); - VERIFY(name_len, strlen(ATTR1_NAME), "H5Aget_name"); - if (strcmp(attr_name, ATTR1_NAME) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR1_NAME); - - /* Verify Name with too small of a buffer */ - name_len = H5Aget_name(attr, strlen(ATTR1_NAME), attr_name); - VERIFY(name_len, strlen(ATTR1_NAME), "H5Aget_name"); - strcpy(temp_name, ATTR1_NAME); /* make a copy of the name */ - temp_name[strlen(ATTR1_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */ - if (strcmp(attr_name, temp_name) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open 2nd attribute for the dataset */ - attr = - H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)1, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Dataspace */ - space = H5Aget_space(attr); - CHECK(space, FAIL, "H5Aget_space"); - rank = H5Sget_simple_extent_ndims(space); - VERIFY(rank, ATTR2_RANK, "H5Sget_simple_extent_ndims"); - ret = H5Sget_simple_extent_dims(space, dims, NULL); - CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); - if (dims[0] != ATTR2_DIM1) - TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR2_DIM1); - if (dims[1] != ATTR2_DIM2) - TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR2_DIM2); - H5Sclose(space); - - /* Verify Datatype */ - type = H5Aget_type(attr); - CHECK(type, FAIL, "H5Aget_type"); - t_class = H5Tget_class(type); - VERIFY(t_class, H5T_INTEGER, "H5Tget_class"); - order = H5Tget_order(type); - VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order"); - size = H5Tget_size(type); - VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size"); - H5Tclose(type); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data2); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR2_DIM1; i++) - for (j = 0; j < ATTR2_DIM2; j++) - if (attr_data2[i][j] != read_data2[i][j]) - TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n", - __LINE__, i, j, attr_data2[i][j], i, j, read_data2[i][j]); - - /* Verify Name */ - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); - VERIFY(name_len, strlen(ATTR2_NAME), "H5Aget_name"); - if (strcmp(attr_name, ATTR2_NAME) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR2_NAME); - - /* Verify Name with too small of a buffer */ - name_len = H5Aget_name(attr, strlen(ATTR2_NAME), attr_name); - VERIFY(name_len, strlen(ATTR2_NAME), "H5Aget_name"); - strcpy(temp_name, ATTR2_NAME); /* make a copy of the name */ - temp_name[strlen(ATTR2_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */ - if (strcmp(attr_name, temp_name) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open 2nd attribute for the dataset */ - attr = - H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Dataspace */ - space = H5Aget_space(attr); - CHECK(space, FAIL, "H5Aget_space"); - rank = H5Sget_simple_extent_ndims(space); - VERIFY(rank, ATTR3_RANK, "H5Sget_simple_extent_ndims"); - ret = H5Sget_simple_extent_dims(space, dims, NULL); - CHECK(ret, FAIL, "H5Sget_simple_extent_dims"); - if (dims[0] != ATTR3_DIM1) - TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR3_DIM1); - if (dims[1] != ATTR3_DIM2) - TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR3_DIM2); - if (dims[2] != ATTR3_DIM3) - TestErrPrintf("attribute dimensions different: dims[2]=%d, should be %d\n", (int)dims[2], ATTR3_DIM3); - H5Sclose(space); - - /* Verify Datatype */ - type = H5Aget_type(attr); - CHECK(type, FAIL, "H5Aget_type"); - t_class = H5Tget_class(type); - VERIFY(t_class, H5T_FLOAT, "H5Tget_class"); - order = H5Tget_order(type); - VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_DOUBLE), H5T_order_t, "%d", "H5Tget_order"); - size = H5Tget_size(type); - VERIFY(size, H5Tget_size(H5T_NATIVE_DOUBLE), "H5Tget_size"); - H5Tclose(type); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_DOUBLE, read_data3); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR3_DIM1; i++) - for (j = 0; j < ATTR3_DIM2; j++) - for (k = 0; k < ATTR3_DIM3; k++) - if (!H5_DBL_ABS_EQUAL(attr_data3[i][j][k], read_data3[i][j][k])) - TestErrPrintf("%d: attribute data different: attr_data3[%d][%d][%d]=%f, " - "read_data3[%d][%d][%d]=%f\n", - __LINE__, i, j, k, attr_data3[i][j][k], i, j, k, read_data3[i][j][k]); - - /* Verify Name */ - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); - VERIFY(name_len, strlen(ATTR3_NAME), "H5Aget_name"); - if (strcmp(attr_name, ATTR3_NAME) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME); - - /* Verify Name with too small of a buffer */ - name_len = H5Aget_name(attr, strlen(ATTR3_NAME), attr_name); - VERIFY(name_len, strlen(ATTR3_NAME), "H5Aget_name"); - strcpy(temp_name, ATTR3_NAME); /* make a copy of the name */ - temp_name[strlen(ATTR3_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */ - if (strcmp(attr_name, temp_name) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_mult_read() */ - -/**************************************************************** -** -** attr_op1(): Attribute operator -** -****************************************************************/ -static herr_t -attr_op1(hid_t H5_ATTR_UNUSED loc_id, const char *name, const H5A_info_t H5_ATTR_UNUSED *ainfo, void *op_data) -{ - int *count = (int *)op_data; - herr_t ret = 0; - - switch (*count) { - case 0: - if (strcmp(name, ATTR1_NAME) != 0) - TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR1_NAME); - (*count)++; - break; - - case 1: - if (strcmp(name, ATTR2_NAME) != 0) - TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR2_NAME); - (*count)++; - break; - - case 2: - if (strcmp(name, ATTR3_NAME) != 0) - TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR3_NAME); - (*count)++; - break; - - default: - ret = -1; - break; - } /* end switch() */ - - return (ret); -} /* end attr_op1() */ - -/**************************************************************** -** -** test_attr_iterate(): Test H5A (attribute) iterator code. -** -****************************************************************/ -static void -test_attr_iterate(hid_t fapl) -{ - hid_t file; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - int count; /* operator data for the iterator */ - H5O_info2_t oinfo; /* Object info */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Attribute Functions\n")); - - /* Open file */ - file = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(file, FAIL, "H5Fopen"); - - /* Create a dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a new dataset */ - dataset = H5Dcreate2(file, DSET2_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 0, "H5Oget_info3"); - - /* Iterate over attributes on dataset */ - count = 0; - ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_op1, &count); - VERIFY(ret, 0, "H5Aiterate2"); - - /* Close dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open existing dataset w/attributes */ - dataset = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); - - /* Iterate over attributes on dataset */ - count = 0; - ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_op1, &count); - VERIFY(ret, 0, "H5Aiterate2"); - - /* Close dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_iterate() */ - -/**************************************************************** -** -** test_attr_delete(): Test H5A (attribute) code for deleting objects. -** -****************************************************************/ -static void -test_attr_delete(hid_t fapl) -{ - hid_t fid1; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t attr; /* Attribute ID */ - char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */ - ssize_t name_len; /* Length of attribute name */ - H5O_info2_t oinfo; /* Object info */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Attribute Deletion Functions\n")); - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Try to delete bogus attribute */ - H5E_BEGIN_TRY - { - ret = H5Adelete(dataset, "Bogus"); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Adelete"); - } - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 3, "H5Oget_info3"); - - /* Delete middle (2nd) attribute */ - ret = H5Adelete(dataset, ATTR2_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 2, "H5Oget_info3"); - - /* Open 1st attribute for the dataset */ - attr = - H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Name */ - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); - VERIFY(name_len, strlen(ATTR1_NAME), "H5Aget_name"); - if (strcmp(attr_name, ATTR1_NAME) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR1_NAME); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open last (formally 3rd) attribute for the dataset */ - attr = - H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)1, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Name */ - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); - VERIFY(name_len, strlen(ATTR3_NAME), "H5Aget_name"); - if (strcmp(attr_name, ATTR3_NAME) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Delete first attribute */ - ret = H5Adelete(dataset, ATTR1_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 1, "H5Oget_info3"); - - /* Open last (formally 3rd) attribute for the dataset */ - attr = - H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Name */ - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name); - VERIFY(name_len, strlen(ATTR3_NAME), "H5Aget_name"); - if (strcmp(attr_name, ATTR3_NAME) != 0) - TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Delete first attribute */ - ret = H5Adelete(dataset, ATTR3_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Verify the correct number of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, 0, "H5Oget_info3"); - - /* Close dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_delete() */ - -/**************************************************************** -** -** test_attr_dtype_shared(): Test H5A (attribute) code for using -** shared datatypes in attributes. -** -****************************************************************/ -static void -test_attr_dtype_shared(hid_t fapl) -{ - hid_t file_id; /* File ID */ - hid_t dset_id; /* Dataset ID */ - hid_t space_id; /* Dataspace ID for dataset & attribute */ - hid_t type_id; /* Datatype ID for named datatype */ - hid_t attr_id; /* Attribute ID */ - int data = 8; /* Data to write */ - int rdata = 0; /* Read read in */ - H5O_info2_t oinfo; /* Object's information */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Shared Datatypes with Attributes\n")); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) && (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { - /* Create a file */ - file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file_id, FAIL, "H5Fopen"); - - /* Close file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - - /* Re-open file */ - file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(file_id, FAIL, "H5Fopen"); - - /* Create a datatype to commit and use */ - type_id = H5Tcopy(H5T_NATIVE_INT); - CHECK(type_id, FAIL, "H5Tcopy"); - - /* Commit datatype to file */ - ret = H5Tcommit2(file_id, TYPE1_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Check reference count on named datatype */ - ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "H5Oget_info_by_name3"); - - /* Create dataspace for dataset */ - space_id = H5Screate(H5S_SCALAR); - CHECK(space_id, FAIL, "H5Screate"); - - /* Create dataset */ - dset_id = H5Dcreate2(file_id, DSET1_NAME, type_id, space_id, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - /* Check reference count on named datatype */ - ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "H5Oget_info_by_name3"); - - /* Create attribute on dataset */ - attr_id = H5Acreate2(dset_id, ATTR1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - - /* Check reference count on named datatype */ - ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Delete attribute */ - ret = H5Adelete(dset_id, ATTR1_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Check reference count on named datatype */ - ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "H5Oget_info_by_name3"); - - /* Create attribute on dataset */ - attr_id = H5Acreate2(dset_id, ATTR1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - - /* Check reference count on named datatype */ - ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3"); - - /* Write data into the attribute */ - ret = H5Awrite(attr_id, H5T_NATIVE_INT, &data); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataspace */ - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(file_id, FAIL, "H5Fopen"); - - /* Open dataset */ - dset_id = H5Dopen2(file_id, DSET1_NAME, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - /* Open attribute */ - attr_id = H5Aopen(dset_id, ATTR1_NAME, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Aopen"); - - /* Read data from the attribute */ - ret = H5Aread(attr_id, H5T_NATIVE_INT, &rdata); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(data, rdata, "H5Aread"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Check reference count on named datatype */ - ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3"); - - /* Unlink the dataset */ - ret = H5Ldelete(file_id, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Check reference count on named datatype */ - ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "H5Oget_info_by_name3"); - - /* Unlink the named datatype */ - ret = H5Ldelete(file_id, TYPE1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); -#endif - } -} /* test_attr_dtype_shared() */ - -/**************************************************************** -** -** test_attr_duplicate_ids(): Test operations with more than -** one ID handles. -** -****************************************************************/ -static void -test_attr_duplicate_ids(hid_t fapl) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t gid1, gid2; /* Group ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t attr, attr2; /* Attribute ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {ATTR1_DIM1}; - int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ - int rewrite_data[ATTR1_DIM1] = {1234, -423, 9907256}; /* Test data for rewrite */ - int i; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing operations with two ID handles\n")); - - /*----------------------------------------------------------------------------------- - * Create an attribute in a new file and fill it with fill value. - */ - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Create dataspace for attribute */ - sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Try to create an attribute on the dataset */ - attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Open the attribute just created and get a second ID */ - attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); - CHECK(attr2, FAIL, "H5Aopen"); - - /* Close attribute */ - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Reopen the file and verify the fill value for attribute. Also write - * some real data. - */ - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Open first attribute for the dataset */ - attr = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read attribute with fill value */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (0 != read_data1[i]) - TestErrPrintf("%d: attribute data different: read_data1[%d]=%d\n", __LINE__, i, read_data1[i]); - - /* Open attribute for the second time */ - attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Write attribute information */ - ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Reopen the file and verify the data. Also rewrite the data and verify it. - */ - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Open first attribute for the dataset */ - attr = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Open attribute for the second time */ - attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Write attribute information */ - ret = H5Awrite(attr2, H5T_NATIVE_INT, rewrite_data); - CHECK(ret, FAIL, "H5Awrite"); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (read_data1[i] != rewrite_data[i]) - TestErrPrintf("%d: attribute data different: read_data1[%d]=%d, rewrite_data[%d]=%d\n", __LINE__, - i, read_data1[i], i, rewrite_data[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Verify that the attribute being pointed to by different paths shares - * the same data. - */ - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Create a group */ - gid1 = H5Gcreate2(fid1, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - - /* Create hard link to the first group */ - ret = H5Lcreate_hard(gid1, GROUP1_NAME, H5L_SAME_LOC, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_hard"); - - /* Try to create an attribute on the group */ - attr = H5Acreate2(gid1, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Open the hard link just created */ - gid2 = H5Gopen2(fid1, GROUP2_NAME, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - - /* Open the attribute of the group for the second time */ - attr2 = H5Aopen(gid2, ATTR2_NAME, H5P_DEFAULT); - CHECK(attr2, FAIL, "H5Aopen"); - - /* Write attribute information with the first attribute handle */ - ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); - CHECK(ret, FAIL, "H5Awrite"); - - /* Read attribute information with the second attribute handle */ - ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close group */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close Attribute dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_duplicate_ids() */ - -/**************************************************************** -** -** test_attr_dense_verify(): Test basic H5A (attribute) code. -** Verify attributes on object -** -****************************************************************/ -static int -test_attr_dense_verify(hid_t loc_id, unsigned max_attr) -{ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - hid_t attr; /* Attribute ID */ - unsigned value; /* Attribute value */ - unsigned u; /* Local index variable */ - int old_nerrs; /* Number of errors when entering this check */ - herr_t ret; /* Generic return value */ - - /* Retrieve the current # of reported errors */ - old_nerrs = nerrors; - - /* Re-open all the attributes by name and verify the data */ - for (u = 0; u < max_attr; u++) { - /* Open attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Aopen(loc_id, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read data from the attribute */ - ret = H5Aread(attr, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(value, u, "H5Aread"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Re-open all the attributes by index and verify the data */ - for (u = 0; u < max_attr; u++) { - ssize_t name_len; /* Length of attribute name */ - char check_name[ATTR_NAME_LEN]; /* Buffer for checking attribute names */ - - /* Open attribute */ - attr = H5Aopen_by_idx(loc_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen_by_idx"); - - /* Verify Name */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, check_name); - VERIFY(name_len, strlen(attrname), "H5Aget_name"); - if (strcmp(check_name, attrname) != 0) - TestErrPrintf("attribute name different: attrname = '%s', should be '%s'\n", check_name, - attrname); - - /* Read data from the attribute */ - ret = H5Aread(attr, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(value, u, "H5Aread"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Retrieve current # of errors */ - if (old_nerrs == nerrors) - return (0); - else - return (-1); -} /* test_attr_dense_verify() */ - -/**************************************************************** -** -** test_attr_dense_create(): Test basic H5A (attribute) code. -** Tests "dense" attribute storage creation -** -****************************************************************/ -static void -test_attr_dense_create(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ -#endif - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dense Attribute Storage Creation\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* need DCPL to query the group creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes, until just before converting to dense storage */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add one more attribute, to push into "dense" storage */ - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) { - /* Attempt to add attribute again, which should fail */ - H5E_BEGIN_TRY - { - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(attr, FAIL, "H5Acreate2"); - } - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink dataset with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_dense_create() */ - -/**************************************************************** -** -** test_attr_dense_open(): Test basic H5A (attribute) code. -** Tests opening attributes in "dense" storage -** -****************************************************************/ -static void -test_attr_dense_open(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ -#endif - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Opening Attributes in Dense Storage\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* need DCPL to query the group creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Enable creation order tracking on attributes, so creation order tests work */ - ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes, until just before converting to dense storage */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Verify attributes written so far */ - ret = test_attr_dense_verify(dataset, u); - CHECK(ret, FAIL, "test_attr_dense_verify"); - } /* end for */ -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add one more attribute, to push into "dense" storage */ - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Verify all the attributes written */ - ret = test_attr_dense_verify(dataset, (u + 1)); - CHECK(ret, FAIL, "test_attr_dense_verify"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink dataset with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_dense_open() */ - -/**************************************************************** -** -** test_attr_dense_delete(): Test basic H5A (attribute) code. -** Tests deleting attributes in "dense" storage -** -****************************************************************/ -static void -test_attr_dense_delete(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ -#endif - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - H5O_info2_t oinfo; /* Object info */ - int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); - herr_t ret; /* Generic return value */ - - /* Only run this test for sec2/default driver */ - if (!h5_using_default_driver(NULL)) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deleting Attributes in Dense Storage\n")); - - if (use_min_dset_oh) { /* using minimized dataset headers */ - /* modify fcpl... - * sidestep "bug" where file space is lost with minimized dset ohdrs - */ - fcpl = H5Pcopy(fcpl); - CHECK(fcpl, FAIL, "H5Pcopy"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, true, 1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - } - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - if (use_min_dset_oh) - CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* need DCPL to query the group creation properties */ - if (use_min_dset_oh) { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - else { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - - /* Enable creation order tracking on attributes, so creation order tests work */ - ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes, until well into dense storage */ - for (u = 0; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Check # of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); - } /* end for */ -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Delete attributes until the attributes revert to compact storage again */ - for (u--; u >= min_dense; u--) { - /* Delete attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); - - /* Verify attributes still left */ - ret = test_attr_dense_verify(dataset, u); - CHECK(ret, FAIL, "test_attr_dense_verify"); - } /* end for */ -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Delete one more attribute, which should cause reversion to compact storage */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Verify attributes still left */ - ret = test_attr_dense_verify(dataset, (u - 1)); - CHECK(ret, FAIL, "test_attr_dense_verify"); - - /* Delete another attribute, to verify deletion in compact storage */ - snprintf(attrname, sizeof(attrname), "attr %02u", (u - 1)); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Verify attributes still left */ - ret = test_attr_dense_verify(dataset, (u - 2)); - CHECK(ret, FAIL, "test_attr_dense_verify"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink dataset with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_dense_delete() */ - -/**************************************************************** -** -** test_attr_dense_rename(): Test basic H5A (attribute) code. -** Tests renaming attributes in "dense" storage -** -****************************************************************/ -static void -test_attr_dense_rename(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - char new_attrname[NAME_BUF_SIZE]; /* New name of attribute */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - H5O_info2_t oinfo; /* Object info */ - unsigned u; /* Local index variable */ - int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); - unsigned use_corder; /* Track creation order or not */ - herr_t ret; /* Generic return value */ - - /* Only run this test for sec2/default driver */ - if (!h5_using_default_driver(NULL)) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Renaming Attributes in Dense Storage\n")); - - if (use_min_dset_oh) { /* using minimized dataset headers */ - /* modify fcpl... - * sidestep "bug" where file space is lost with minimized dset ohdrs - */ - fcpl = H5Pcopy(fcpl); - CHECK(fcpl, FAIL, "H5Pcopy"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, true, 1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - } - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - if (use_min_dset_oh) - CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fopen"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, H5I_INVALID_HID, "H5Screate"); - - /* need DCPL to query the group creation properties */ - if (use_min_dset_oh) { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcopy"); - } - else { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); - } - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Using creation order or not */ - for (use_corder = false; use_corder <= true; use_corder++) { - - if (use_corder) { - ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - } - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes, until well into dense storage */ - for (u = 0; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Rename attribute */ - snprintf(new_attrname, sizeof(new_attrname), "new attr %02u", u); - - /* Rename attribute */ - ret = H5Arename_by_name(fid, DSET1_NAME, attrname, new_attrname, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Arename_by_name"); - - /* Check # of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); - } /* end for */ -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - if (!use_corder) { - /* Unlink dataset with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } - - } /* end for use_corder */ - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fopen"); - - /* Open dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - /* Verify renamed attributes */ - for (u = 0; u < (max_compact * 2); u++) { - unsigned value; /* Attribute value */ - - /* Open attribute */ - snprintf(attrname, sizeof(attrname), "new attr %02u", u); - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Aopen"); - - /* Read data from the attribute */ - ret = H5Aread(attr, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(value, u, "H5Aread"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink dataset with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_dense_rename() */ - -/**************************************************************** -** -** test_attr_dense_unlink(): Test basic H5A (attribute) code. -** Tests unlinking object with attributes in "dense" storage -** -****************************************************************/ -static void -test_attr_dense_unlink(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ - size_t mesg_count; /* # of shared messages */ - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - H5O_info2_t oinfo; /* Object info */ - unsigned u; /* Local index variable */ - int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); - herr_t ret; /* Generic return value */ - - /* Only run this test for sec2/default driver */ - if (!h5_using_default_driver(NULL)) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Unlinking Object with Attributes in Dense Storage\n")); - - if (use_min_dset_oh) { /* using minimized dataset headers */ - /* modify fcpl... - * sidestep "bug" where file space is lost with minimized dset ohdrs - */ - fcpl = H5Pcopy(fcpl); - CHECK(fcpl, FAIL, "H5Pcopy"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, true, 1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - } - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - if (use_min_dset_oh) - CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* need DCPL to query the group creation properties */ - if (use_min_dset_oh) { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - else { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes, until well into dense storage */ - for (u = 0; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Check # of attributes */ - ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info3"); - VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); - } /* end for */ -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Unlink dataset */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); -#if 0 - /* Check on dataset's attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); -#endif - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_dense_unlink() */ - -/**************************************************************** -** -** test_attr_dense_limits(): Test basic H5A (attribute) code. -** Tests attribute in "dense" storage limits -** -****************************************************************/ -static void -test_attr_dense_limits(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned max_compact, rmax_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense, rmin_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ -#endif - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Phase Change Limits For Attributes in Dense Storage\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* need DCPL to query the group creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Change limits on compact/dense attribute storage */ - max_compact = 0; - min_dense = 0; - ret = H5Pset_attr_phase_change(dcpl, max_compact, min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &rmax_compact, &rmin_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(rmax_compact, max_compact, "H5Pget_attr_phase_change"); - VERIFY(rmin_dense, min_dense, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Add first attribute, which should be immediately in dense storage */ - - /* Create attribute */ - u = 0; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Add second attribute, to allow deletions to be checked easily */ - - /* Create attribute */ - u = 1; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Delete second attribute, attributes should still be stored densely */ - - /* Delete attribute */ - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Delete first attribute, attributes should not be stored densely */ - - /* Delete attribute */ - u = 0; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink dataset */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_dense_limits() */ - -/**************************************************************** -** -** test_attr_dense_dup_ids(): Test operations with multiple ID -** handles with "dense" attribute storage creation -** -****************************************************************/ -static void -test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t gid1, gid2; /* Group ID */ - hid_t sid, sid2; /* Dataspace ID */ - hid_t attr, attr2, add_attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - hsize_t dims[] = {ATTR1_DIM1}; - int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading attribute */ - int rewrite_data[ATTR1_DIM1] = {1234, -423, 9907256}; /* Test data for rewrite */ - unsigned scalar_data = 1317; /* scalar data for attribute */ - unsigned read_scalar; /* variable for reading attribute*/ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ -#endif - unsigned u, i; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing operations with two IDs for Dense Storage\n")); - - /*----------------------------------------------------------------------------------- - * Create an attribute in dense storage and fill it with fill value. - */ - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* need DCPL to query the group creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes, until just before converting to dense storage */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add one more attribute, to push into "dense" storage */ - /* Create dataspace for attribute */ - sid2 = H5Screate_simple(ATTR1_RANK, dims, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Open the attribute just created and get a second ID */ - attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr2, FAIL, "H5Aopen"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Reopen the file and verify the fill value for attribute. Also write - * some real data. - */ - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Open first attribute for the dataset */ - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read attribute with fill value */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (0 != read_data1[i]) - TestErrPrintf("%d: attribute data different: read_data1[%d]=%d\n", __LINE__, i, read_data1[i]); - - /* Open attribute for the second time */ - attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Write attribute information */ - ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Reopen the file and verify the data. Also rewrite the data and verify it. - */ - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Open first attribute for the dataset */ - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Read attribute information */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Open attribute for the second time */ - attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Write attribute information with the second ID */ - ret = H5Awrite(attr2, H5T_NATIVE_INT, rewrite_data); - CHECK(ret, FAIL, "H5Awrite"); - - /* Read attribute information with the first ID */ - ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (read_data1[i] != rewrite_data[i]) - TestErrPrintf("%d: attribute data different: read_data1[%d]=%d, rewrite_data[%d]=%d\n", __LINE__, - i, read_data1[i], i, rewrite_data[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Open the attribute by index. Verify the data is shared when the attribute - * is opened twice. - */ - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Open first attribute for the dataset */ - attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Open attribute for the second time */ - attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Write attribute information with the second ID */ - ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data); - CHECK(ret, FAIL, "H5Awrite"); - - /* Read attribute information with the first ID */ - ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - if (read_scalar != scalar_data) - TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar, - scalar_data); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Open one attribute. As it remains open, delete some attributes. The - * attribute storage should switch from dense to compact. Then open the - * same attribute for the second time and verify that the attribute data - * is shared. - */ - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Open attribute of the dataset for the first time */ - attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Delete a few attributes until the storage switches to compact */ - for (u = max_compact; u >= min_dense - 1; u--) { - ret = H5Adelete_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)u, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); - } -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Open attribute for the second time */ - attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Write attribute information with the second ID */ - ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data); - CHECK(ret, FAIL, "H5Awrite"); - - /* Read attribute information with the first ID */ - ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - if (read_scalar != scalar_data) - TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar, - scalar_data); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Open one attribute. As it remains open, create some attributes. The - * attribute storage should switch from compact to dense. Then open the - * same attribute for the second time and verify that the attribute data - * is shared. - */ - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Open attribute of the dataset for the first time */ - attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Delete a few attributes until the storage switches to compact */ - for (u = min_dense - 1; u <= max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - add_attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(add_attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(add_attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(add_attr); - CHECK(ret, FAIL, "H5Aclose"); - } -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Open attribute for the second time */ - attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Write attribute information with the second ID */ - ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data); - CHECK(ret, FAIL, "H5Awrite"); - - /* Read attribute information with the first ID */ - ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - if (read_scalar != scalar_data) - TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar, - scalar_data); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*----------------------------------------------------------------------------------- - * Verify that the attribute being pointed to by different paths shares - * the same data. - */ - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create a group */ - gid1 = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - - /* Create hard link to the first group */ - ret = H5Lcreate_hard(gid1, GROUP1_NAME, H5L_SAME_LOC, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_hard"); - - /* Add attributes, until just before converting to dense storage */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(gid1, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Try to create another attribute to make dense storage */ - attr = H5Acreate2(gid1, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check on group's attribute storage status */ - is_dense = H5O__is_attr_dense_test(gid1); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Open the hard link just created */ - gid2 = H5Gopen2(fid, GROUP2_NAME, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - - /* Open the attribute of the group for the second time */ - attr2 = H5Aopen(gid2, ATTR2_NAME, H5P_DEFAULT); - CHECK(attr2, FAIL, "H5Aopen"); - - /* Write attribute information with the first attribute handle */ - ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1); - CHECK(ret, FAIL, "H5Awrite"); - - /* Read attribute information with the second attribute handle */ - ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1); - CHECK(ret, FAIL, "H5Aread"); - - /* Verify values read in */ - for (i = 0; i < ATTR1_DIM1; i++) - if (attr_data1[i] != read_data1[i]) - TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i, - attr_data1[i], i, read_data1[i]); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(attr2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close group */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close Attribute dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_dense_dup_ids() */ - -/**************************************************************** -** -** test_attr_big(): Test basic H5A (attribute) code. -** Tests storing "big" attribute in dense storage immediately, if available -** -****************************************************************/ -static void -test_attr_big(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t big_sid; /* "Big" dataspace ID */ - hsize_t dims[ATTR6_RANK] = {ATTR6_DIM1, ATTR6_DIM2, ATTR6_DIM3}; /* Attribute dimensions */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ - unsigned nshared_indices; /* # of shared message indices */ - H5F_libver_t low, high; /* File format bounds */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ -#endif - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Storing 'Big' Attributes in Dense Storage\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset & "small" attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create "big" dataspace for "big" attributes */ - big_sid = H5Screate_simple(ATTR6_RANK, dims, NULL); - CHECK(big_sid, FAIL, "H5Screate_simple"); - - /* need DCPL to query the group creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Retrieve # of shared message indices (ie. whether attributes are shared or not) */ - ret = H5Pget_shared_mesg_nindexes(fcpl, &nshared_indices); - CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes"); - - /* Retrieve the format bounds for creating objects in the file */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Add first "small" attribute, which should be in compact storage */ - - /* Create attribute */ - u = 0; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Add second "small" attribute, which should stay in compact storage */ - - /* Create attribute */ - u = 1; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Add first "big" attribute, which should push storage into dense form */ - - /* Create attribute */ - u = 2; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT); - if (low == H5F_LIBVER_LATEST || attr >= 0) { - CHECK(attr, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Check on dataset's attribute storage status */ - /* (when attributes are shared, the "big" attribute goes into the shared - * message heap instead of forcing the attribute storage into the dense - * form - QAK) - */ -#if 0 - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); -#endif - - /* Add second "big" attribute, which should leave storage in dense form */ - - /* Create attribute */ - u = 3; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Check on dataset's attribute storage status */ - /* (when attributes are shared, the "big" attribute goes into the shared - * message heap instead of forcing the attribute storage into the dense - * form - QAK) - */ -#if 0 - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); -#endif - - /* Delete second "small" attribute, attributes should still be stored densely */ - - /* Delete attribute */ - u = 1; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); -#endif - - /* Delete second "big" attribute, attributes should still be stored densely */ - - /* Delete attribute */ - u = 3; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); -#endif - - /* Delete first "big" attribute, attributes should _not_ be stored densely */ - - /* Delete attribute */ - u = 2; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Delete first "small" attribute, should be no attributes now */ - - /* Delete attribute */ - u = 0; - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); -#endif - } /* end if */ -#if 0 - else { - /* Shouldn't be able to create "big" attributes with older version of format */ - VERIFY(attr, FAIL, "H5Acreate2"); - - /* Check on dataset's attribute storage status */ - /* (when attributes are shared, the "big" attribute goes into the shared - * message heap instead of forcing the attribute storage into the dense - * form - QAK) - */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - } /* end else */ -#endif - - /* Close dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(big_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink dataset */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_big() */ - -/**************************************************************** -** -** test_attr_null_space(): Test basic H5A (attribute) code. -** Tests storing attribute with "null" dataspace -** -****************************************************************/ -static void -test_attr_null_space(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t null_sid; /* "null" dataspace ID */ - hid_t attr_sid; /* Attribute's dataspace ID */ - hid_t attr; /* Attribute ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned value; /* Attribute value */ - htri_t cmp; /* Results of comparison */ -#if 0 - hsize_t storage_size; /* Size of storage for attribute */ -#endif - H5A_info_t ainfo; /* Attribute info */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Storing Attributes with 'null' dataspace\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create dataspace for dataset attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create "null" dataspace for attribute */ - null_sid = H5Screate(H5S_NULL); - CHECK(null_sid, FAIL, "H5Screate"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Add attribute with 'null' dataspace */ - - /* Create attribute */ - strcpy(attrname, "null attr"); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, null_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Try to read data from the attribute */ - /* (shouldn't fail, but should leave buffer alone) */ - value = 23; - ret = H5Aread(attr, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(value, 23, "H5Aread"); - - /* Get the dataspace for the attribute and make certain it's 'null' */ - attr_sid = H5Aget_space(attr); - CHECK(attr_sid, FAIL, "H5Aget_space"); - - /* Compare the dataspaces */ - cmp = H5Sextent_equal(attr_sid, null_sid); - CHECK(cmp, FAIL, "H5Sextent_equal"); - VERIFY(cmp, true, "H5Sextent_equal"); - - /* Close dataspace */ - ret = H5Sclose(attr_sid); - CHECK(ret, FAIL, "H5Sclose"); -#if 0 - /* Check the storage size for the attribute */ - storage_size = H5Aget_storage_size(attr); - VERIFY(storage_size, 0, "H5Aget_storage_size"); -#endif - /* Get the attribute info */ - ret = H5Aget_info(attr, &ainfo); - CHECK(ret, FAIL, "H5Aget_info"); -#if 0 - VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Add another attribute with 'null' dataspace */ - - /* Create attribute */ - strcpy(attrname, "null attr #2"); - attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, null_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Try to write data to the attribute */ - /* (shouldn't fail, but should leave buffer alone) */ - value = 23; - ret = H5Awrite(attr, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Awrite"); - VERIFY(value, 23, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file and check on the attributes */ - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Open first attribute */ - strcpy(attrname, "null attr #2"); - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Try to read data from the attribute */ - /* (shouldn't fail, but should leave buffer alone) */ - value = 23; - ret = H5Aread(attr, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(value, 23, "H5Aread"); - - /* Get the dataspace for the attribute and make certain it's 'null' */ - attr_sid = H5Aget_space(attr); - CHECK(attr_sid, FAIL, "H5Aget_space"); - - /* Compare the dataspaces */ - cmp = H5Sextent_equal(attr_sid, null_sid); - CHECK(cmp, FAIL, "H5Sextent_equal"); - VERIFY(cmp, true, "H5Sextent_equal"); - - /* Close dataspace */ - ret = H5Sclose(attr_sid); - CHECK(ret, FAIL, "H5Sclose"); -#if 0 - /* Check the storage size for the attribute */ - storage_size = H5Aget_storage_size(attr); - VERIFY(storage_size, 0, "H5Aget_storage_size"); -#endif - /* Get the attribute info */ - ret = H5Aget_info(attr, &ainfo); - CHECK(ret, FAIL, "H5Aget_info"); -#if 0 - VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); -#endif - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open second attribute */ - strcpy(attrname, "null attr"); - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Try to write data to the attribute */ - /* (shouldn't fail, but should leave buffer alone) */ - value = 23; - ret = H5Awrite(attr, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Awrite"); - VERIFY(value, 23, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink dataset */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(null_sid); - CHECK(ret, FAIL, "H5Sclose"); - -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif -} /* test_attr_null_space() */ - -/**************************************************************** -** -** test_attr_deprec(): Test basic H5A (attribute) code. -** Tests deprecated API routines -** -****************************************************************/ -static void -test_attr_deprec(hid_t fcpl, hid_t fapl) -{ -#ifndef H5_NO_DEPRECATED_SYMBOLS - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deprecated Attribute Routines\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Add attribute to dataset */ - - /* Create attribute */ - attr = H5Acreate1(dataset, "attr", H5T_NATIVE_UINT, sid, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate1"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataspaces */ - ret = H5Sclose(sid); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file and operate on the attribute */ - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open dataset */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - -#if 0 - /* Get number of attributes with bad ID */ - H5E_BEGIN_TRY - { - ret = H5Aget_num_attrs((hid_t)-1); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_num_attrs"); - - /* Get number of attributes */ - ret = H5Aget_num_attrs(dataset); - VERIFY(ret, 1, "H5Aget_num_attrs"); -#endif - /* Open the attribute by index */ - attr = H5Aopen_idx(dataset, 0); - CHECK(attr, FAIL, "H5Aopen_idx"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open the attribute by name */ - attr = H5Aopen_name(dataset, "attr"); - CHECK(attr, FAIL, "H5Aopen_name"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#else /* H5_NO_DEPRECATED_SYMBOLS */ - /* Shut compiler up */ - (void)fcpl; - (void)fapl; - - /* Output message about test being skipped */ - MESSAGE(5, ("Skipping Test On Deprecated Attribute Routines\n")); - -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -} /* test_attr_deprec() */ - -/**************************************************************** -** -** test_attr_many(): Test basic H5A (attribute) code. -** Tests storing lots of attributes -** -****************************************************************/ -static void -test_attr_many(bool new_format, hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t gid; /* Group ID */ - hid_t sid; /* Dataspace ID */ - hid_t aid; /* Attribute ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned nattr = (new_format ? NATTR_MANY_NEW : NATTR_MANY_OLD); /* Number of attributes */ - htri_t exists; /* Whether the attribute exists or not */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Storing Many Attributes\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace for attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create group for attributes */ - gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create many attributes */ - for (u = 0; u < nattr; u++) { - snprintf(attrname, sizeof(attrname), "a-%06u", u); - - exists = H5Aexists(gid, attrname); - VERIFY(exists, false, "H5Aexists"); - - exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); - VERIFY(exists, false, "H5Aexists_by_name"); - - aid = H5Acreate2(gid, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - exists = H5Aexists(gid, attrname); - VERIFY(exists, true, "H5Aexists"); - - exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); - VERIFY(exists, true, "H5Aexists_by_name"); - - ret = H5Awrite(aid, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - exists = H5Aexists(gid, attrname); - VERIFY(exists, true, "H5Aexists"); - - exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); - VERIFY(exists, true, "H5Aexists_by_name"); - } /* end for */ - - /* Close group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file and check on the attributes */ - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open group */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Verify attributes */ - for (u = 0; u < nattr; u++) { - unsigned value; /* Attribute value */ - - snprintf(attrname, sizeof(attrname), "a-%06u", u); - - exists = H5Aexists(gid, attrname); - VERIFY(exists, true, "H5Aexists"); - - exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); - VERIFY(exists, true, "H5Aexists_by_name"); - - aid = H5Aopen(gid, attrname, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Aopen"); - - exists = H5Aexists(gid, attrname); - VERIFY(exists, true, "H5Aexists"); - - exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT); - VERIFY(exists, true, "H5Aexists_by_name"); - - ret = H5Aread(aid, H5T_NATIVE_UINT, &value); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(value, u, "H5Aread"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Close group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_many() */ - -/**************************************************************** -** -** test_attr_corder_create_empty(): Test basic H5A (attribute) code. -** Tests basic code to create objects with attribute creation order info -** -****************************************************************/ -static void -test_attr_corder_create_basic(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned crt_order_flags; /* Creation order flags */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Code for Attributes with Creation Order Info\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } -#if 0 - /* Get creation order indexing on object */ - ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); - CHECK(ret, FAIL, "H5Pget_attr_creation_order"); - VERIFY(crt_order_flags, 0, "H5Pget_attr_creation_order"); -#endif - /* Setting invalid combination of a attribute order creation order indexing on should fail */ - H5E_BEGIN_TRY - { - ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_INDEXED); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_attr_creation_order"); - -#if 0 - ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); - CHECK(ret, FAIL, "H5Pget_attr_creation_order"); - VERIFY(crt_order_flags, 0, "H5Pget_attr_creation_order"); -#endif - - /* Set attribute creation order tracking & indexing for object */ - ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); - CHECK(ret, FAIL, "H5Pget_attr_creation_order"); - VERIFY(crt_order_flags, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "H5Pget_attr_creation_order"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open dataset created */ - dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - /* Retrieve dataset creation property list for group */ - dcpl = H5Dget_create_plist(dataset); - CHECK(dcpl, FAIL, "H5Dget_create_plist"); - - /* Query the attribute creation properties */ - ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags); - CHECK(ret, FAIL, "H5Pget_attr_creation_order"); - VERIFY(crt_order_flags, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "H5Pget_attr_creation_order"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_corder_create_basic() */ - -/**************************************************************** -** -** test_attr_corder_create_compact(): Test basic H5A (attribute) code. -** Tests compact attribute storage on objects with attribute creation order info -** -****************************************************************/ -static void -test_attr_corder_create_compact(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ -#endif - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Compact Storage of Attributes with Creation Order Info\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Set attribute creation order tracking & indexing for object */ - ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Create several attributes, but keep storage in compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - } /* end for */ - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open datasets created */ - dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dopen2"); - dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dopen2"); - dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dopen2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Loop through attributes, checking their creation order values */ - /* (the name index is used, but the creation order value is in the same order) */ - for (u = 0; u < max_compact; u++) { - H5A_info_t ainfo; /* Attribute information */ - - /* Retrieve information for attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - - /* Verify creation order of attribute */ - VERIFY(ainfo.corder_valid, true, "H5Aget_info_by_name"); - VERIFY(ainfo.corder, u, "H5Aget_info_by_name"); - } /* end for */ - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_corder_create_compact() */ - -/**************************************************************** -** -** test_attr_corder_create_dense(): Test basic H5A (attribute) code. -** Tests dense attribute storage on objects with attribute creation order info -** -****************************************************************/ -static void -test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dense Storage of Attributes with Creation Order Info\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Set attribute creation order tracking & indexing for object */ - ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Create several attributes, but keep storage in compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - } /* end for */ - - /* Create another attribute, to push into dense storage */ - snprintf(attrname, sizeof(attrname), "attr %02u", max_compact); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open datasets created */ - dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dopen2"); - dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dopen2"); - dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dopen2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Loop through attributes, checking their creation order values */ - /* (the name index is used, but the creation order value is in the same order) */ - for (u = 0; u < (max_compact + 1); u++) { - H5A_info_t ainfo; /* Attribute information */ - - /* Retrieve information for attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - - /* Verify creation order of attribute */ - VERIFY(ainfo.corder_valid, true, "H5Aget_info_by_name"); - VERIFY(ainfo.corder, u, "H5Aget_info_by_name"); - } /* end for */ - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_corder_create_dense() */ - -/**************************************************************** -** -** test_attr_corder_create_reopen(): Test basic H5A (attribute) code. -** Test creating attributes w/reopening file from using new format -** to using old format -** -****************************************************************/ -static void -test_attr_corder_create_reopen(hid_t fcpl, hid_t fapl) -{ - hid_t fid = -1; /* File ID */ - hid_t gcpl_id = -1; /* Group creation property list ID */ - hid_t gid = -1; /* Group ID */ - hid_t sid = -1; /* Dataspace ID */ - hid_t aid = -1; /* Attribute ID */ - int buf; /* Attribute data */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Creating Attributes w/New & Old Format\n")); - - /* Create dataspace for attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create group */ - gcpl_id = H5Pcreate(H5P_GROUP_CREATE); - CHECK(gcpl_id, FAIL, "H5Pcreate"); - ret = H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl_id, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create a couple of attributes */ - aid = H5Acreate2(gid, "attr-003", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - buf = 3; - ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); - CHECK(ret, FAIL, "H5Awrite"); - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - aid = H5Acreate2(gid, "attr-004", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - buf = 4; - ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); - CHECK(ret, FAIL, "H5Awrite"); - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /***** Close group & GCPL *****/ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Pclose(gcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file, without "use the latest format" flag */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open group */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Delete attribute */ - ret = H5Adelete(gid, "attr-003"); - CHECK(aid, FAIL, "H5Adelete"); - - /* Create some additional attributes */ - aid = H5Acreate2(gid, "attr-008", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - buf = 8; - ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); - CHECK(ret, FAIL, "H5Awrite"); - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - aid = H5Acreate2(gid, "attr-006", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - buf = 6; - ret = H5Awrite(aid, H5T_NATIVE_INT, &buf); - CHECK(ret, FAIL, "H5Awrite"); - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /***** Close group *****/ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close attribute dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_corder_create_reopen() */ - -/**************************************************************** -** -** test_attr_corder_transition(): Test basic H5A (attribute) code. -** Tests attribute storage transitions on objects with attribute creation order info -** -****************************************************************/ -static void -test_attr_corder_transition(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Storage Transitions of Attributes with Creation Order Info\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Set attribute creation order tracking & indexing for object */ - ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* XXX: Try to find a way to resize dataset's object header so that the object - * header can have one chunk, then retrieve "empty" file size and check - * that size after everything is deleted -QAK - */ - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open datasets created */ - dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dopen2"); - dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dopen2"); - dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dopen2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Create several attributes, but keep storage in compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - } /* end for */ - - /* Create another attribute, to push into dense storage */ - snprintf(attrname, sizeof(attrname), "attr %02u", max_compact); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - /* Delete several attributes from object, until attribute storage resumes compact form */ - for (u = max_compact; u >= min_dense; u--) { - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, u, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - } /* end for */ - - /* Delete another attribute, to push attribute storage into compact form */ - snprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1)); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Re-add attributes to get back into dense form */ - for (u = (min_dense - 1); u < (max_compact + 1); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open datasets created */ - dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dopen2"); - dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dopen2"); - dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dopen2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - /* Delete several attributes from object, until attribute storage resumes compact form */ - for (u = max_compact; u >= min_dense; u--) { - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, u, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - } /* end for */ - - /* Delete another attribute, to push attribute storage into compact form */ - snprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1)); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Re-add attributes to get back into dense form */ - for (u = (min_dense - 1); u < (max_compact + 1); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - /* Delete all attributes */ - for (u = max_compact; u > 0; u--) { - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); - } /* end for */ - snprintf(attrname, sizeof(attrname), "attr %02u", 0); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_corder_transition() */ - -/**************************************************************** -** -** test_attr_corder_delete(): Test basic H5A (attribute) code. -** Tests deleting object w/dense attribute storage on objects with attribute creation order info -** -****************************************************************/ -static void -test_attr_corder_delete(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - unsigned reopen_file; /* Whether to re-open the file before deleting group */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ -#ifdef LATER - h5_stat_size_t empty_size; /* Size of empty file */ - h5_stat_size_t file_size; /* Size of file after operating on it */ -#endif /* LATER */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deleting Object w/Dense Attribute Storage and Creation Order Info\n")); - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Set attribute creation order tracking & indexing for object */ - ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - -/* XXX: Try to find a way to resize dataset's object header so that the object - * header can have one chunk, then retrieve "empty" file size and check - * that size after everything is deleted -QAK - */ -#ifdef LATER - /* Create empty file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Get the size of an empty file */ - empty_size = h5_get_file_size(FILENAME); - CHECK(empty_size, FAIL, "h5_get_file_size"); -#endif /* LATER */ - - /* Loop to leave file open when deleting dataset, or to close & re-open file - * before deleting dataset */ - for (reopen_file = false; reopen_file <= true; reopen_file++) { - /* Create test file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Create attributes, until attribute storage is in dense form */ - for (u = 0; u < max_compact * 2; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); -#endif - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Check for deleting datasets without re-opening file */ - if (!reopen_file) { - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSET3_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end if */ - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Check for deleting dataset after re-opening file */ - if (reopen_file) { - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Delete the datasets */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSET3_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end if */ - -#ifdef LATER - /* Get the size of the file now */ - file_size = h5_get_file_size(FILENAME); - CHECK(file_size, FAIL, "h5_get_file_size"); - VERIFY(file_size, empty_size, "h5_get_file_size"); -#endif /* LATER */ - } /* end for */ - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_corder_delete() */ - -/*------------------------------------------------------------------------- - * Function: attr_info_by_idx_check - * - * Purpose: Support routine for attr_info_by_idx, to verify the attribute - * info is correct for a attribute - * - * Note: This routine assumes that the attributes have been added to the - * object in alphabetical order. - * - * Return: Success: 0 - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -static int -attr_info_by_idx_check(hid_t obj_id, const char *attrname, hsize_t n, bool use_index) -{ - char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ - H5A_info_t ainfo; /* Attribute info struct */ - int old_nerrs; /* Number of errors when entering this check */ - herr_t ret; /* Generic return value */ - - /* Retrieve the current # of reported errors */ - old_nerrs = nerrors; - - /* Verify the information for first attribute, in increasing creation order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); - - /* Verify the information for new attribute, in increasing creation order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, n, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); - - /* Verify the name for new link, in increasing creation order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, n, tmpname, - (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_name_by_idx"); - if (strcmp(attrname, tmpname) != 0) - TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); - - /* Don't test "native" order if there is no creation order index, since - * there's not a good way to easily predict the attribute's order in the name - * index. - */ - if (use_index) { - /* Verify the information for first attribute, in native creation order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, - (hsize_t)0, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); - - /* Verify the information for new attribute, in native creation order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, n, &ainfo, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); - - /* Verify the name for new link, in increasing native order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, n, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_name_by_idx"); - if (strcmp(attrname, tmpname) != 0) - TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); - } /* end if */ - - /* Verify the information for first attribute, in decreasing creation order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, n, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); - - /* Verify the information for new attribute, in increasing creation order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)0, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); - - /* Verify the name for new link, in increasing creation order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)0, tmpname, - (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_name_by_idx"); - if (strcmp(attrname, tmpname) != 0) - TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); - - /* Verify the information for first attribute, in increasing name order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); - - /* Verify the information for new attribute, in increasing name order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, n, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); - - /* Verify the name for new link, in increasing name order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, n, tmpname, - (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_name_by_idx"); - if (strcmp(attrname, tmpname) != 0) - TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); - - /* Don't test "native" order queries on link name order, since there's not - * a good way to easily predict the order of the links in the name index. - */ - - /* Verify the information for first attribute, in decreasing name order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, n, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); - - /* Verify the information for new attribute, in increasing name order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, (hsize_t)0, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_idx"); - VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); - - /* Verify the name for new link, in increasing name order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, (hsize_t)0, tmpname, - (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_name_by_idx"); - if (strcmp(attrname, tmpname) != 0) - TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__); - - /* Retrieve current # of errors */ - if (old_nerrs == nerrors) - return (0); - else - return (-1); -} /* end attr_info_by_idx_check() */ - -/**************************************************************** -** -** test_attr_info_by_idx(): Test basic H5A (attribute) code. -** Tests querying attribute info by index -** -****************************************************************/ -static void -test_attr_info_by_idx(bool new_format, hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - H5A_info_t ainfo; /* Attribute information */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - unsigned use_index; /* Use index on creation order values */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Loop over using index for creation order value */ - for (use_index = false; use_index <= true; use_index++) { - /* Output message about test being performed */ - if (use_index) - MESSAGE(5, ("Testing Querying Attribute Info By Index w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Querying Attribute Info By Index w/o Creation Order Index\n")) - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Set attribute creation order tracking & indexing for object */ - if (new_format == true) { - ret = H5Pset_attr_creation_order( - dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - } /* end if */ - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for query on non-existent attribute */ - H5E_BEGIN_TRY - { - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - H5E_BEGIN_TRY - { - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_name_by_idx"); - - /* Create attributes, up to limit of compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for out of bound offset queries */ - H5E_BEGIN_TRY - { - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - H5E_BEGIN_TRY - { - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - H5E_BEGIN_TRY - { - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_name_by_idx"); - - /* Create more attributes, to push into dense form */ - for (; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); -#endif - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ -#endif - /* Check for out of bound offset queries */ - H5E_BEGIN_TRY - { - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - H5E_BEGIN_TRY - { - ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_info_by_idx"); - H5E_BEGIN_TRY - { - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aget_name_by_idx"); - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_info_by_idx() */ - -/*************************************************************** -** -** test_attr_info_null_info_pointer(): A test to ensure that -** passing a NULL attribute info pointer to H5Aget_info -** (_by_name/_by_idx) doesn't cause bad behavior. -** -****************************************************************/ -static void -test_attr_info_null_info_pointer(hid_t fcpl, hid_t fapl) -{ - herr_t err_ret = -1; - hid_t fid; - hid_t attr; - hid_t sid; - - /* Create dataspace for attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create attribute */ - attr = H5Acreate2(fid, GET_INFO_NULL_POINTER_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - H5E_BEGIN_TRY - { - err_ret = H5Aget_info(attr, NULL); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Aget_info"); - - H5E_BEGIN_TRY - { - err_ret = H5Aget_info_by_name(fid, ".", GET_INFO_NULL_POINTER_ATTR_NAME, NULL, H5P_DEFAULT); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Aget_info_by_name"); - - H5E_BEGIN_TRY - { - err_ret = H5Aget_info_by_idx(fid, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, H5P_DEFAULT); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Aget_info_by_idx"); - - /* Close dataspace */ - err_ret = H5Sclose(sid); - CHECK(err_ret, FAIL, "H5Sclose"); - - /* Close attribute */ - err_ret = H5Aclose(attr); - CHECK(err_ret, FAIL, "H5Aclose"); - - /* Close file */ - err_ret = H5Fclose(fid); - CHECK(err_ret, FAIL, "H5Fclose"); -} - -/*************************************************************** -** -** test_attr_rename_invalid_name(): A test to ensure that -** passing a NULL or empty attribute name to -** H5Arename(_by_name) doesn't cause bad behavior. -** -****************************************************************/ -static void -test_attr_rename_invalid_name(hid_t fcpl, hid_t fapl) -{ - herr_t err_ret = -1; - hid_t fid; - hid_t attr; - hid_t sid; - - /* Create dataspace for attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create attribute */ - attr = H5Acreate2(fid, INVALID_RENAME_TEST_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename(fid, NULL, INVALID_RENAME_TEST_NEW_ATTR_NAME); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename(fid, "", INVALID_RENAME_TEST_NEW_ATTR_NAME); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename(fid, INVALID_RENAME_TEST_ATTR_NAME, NULL); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename(fid, INVALID_RENAME_TEST_ATTR_NAME, ""); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename_by_name(fid, ".", NULL, INVALID_RENAME_TEST_NEW_ATTR_NAME, H5P_DEFAULT); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename_by_name"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename_by_name(fid, ".", "", INVALID_RENAME_TEST_NEW_ATTR_NAME, H5P_DEFAULT); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename_by_name"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename_by_name(fid, ".", INVALID_RENAME_TEST_ATTR_NAME, NULL, H5P_DEFAULT); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename_by_name"); - - H5E_BEGIN_TRY - { - err_ret = H5Arename_by_name(fid, ".", INVALID_RENAME_TEST_ATTR_NAME, "", H5P_DEFAULT); - } - H5E_END_TRY - - CHECK(err_ret, SUCCEED, "H5Arename_by_name"); - - /* Close dataspace */ - err_ret = H5Sclose(sid); - CHECK(err_ret, FAIL, "H5Sclose"); - - /* Close attribute */ - err_ret = H5Aclose(attr); - CHECK(err_ret, FAIL, "H5Aclose"); - - /* Close file */ - err_ret = H5Fclose(fid); - CHECK(err_ret, FAIL, "H5Fclose"); -} - -/*************************************************************** -** -** test_attr_get_name_invalid_buf(): A test to ensure that -** passing a NULL buffer to H5Aget_name(_by_idx) when -** the 'size' parameter is non-zero doesn't cause bad -** behavior. -** -****************************************************************/ -static void -test_attr_get_name_invalid_buf(hid_t fcpl, hid_t fapl) -{ - ssize_t err_ret = -1; - hid_t fid; - hid_t attr; - hid_t sid; - - /* Create dataspace for attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create attribute */ - attr = - H5Acreate2(fid, GET_NAME_INVALID_BUF_TEST_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - H5E_BEGIN_TRY - { - err_ret = H5Aget_name(attr, 1, NULL); - } - H5E_END_TRY - - VERIFY(err_ret, FAIL, "H5Aget_name"); - - H5E_BEGIN_TRY - { - err_ret = H5Aget_name_by_idx(fid, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, 1, H5P_DEFAULT); - } - H5E_END_TRY - - VERIFY(err_ret, FAIL, "H5Aget_name_by_idx"); - - /* Close dataspace */ - err_ret = H5Sclose(sid); - CHECK(err_ret, FAIL, "H5Sclose"); - - /* Close attribute */ - err_ret = H5Aclose(attr); - CHECK(err_ret, FAIL, "H5Aclose"); - - /* Close file */ - err_ret = H5Fclose(fid); - CHECK(err_ret, FAIL, "H5Fclose"); -} - -/**************************************************************** -** -** test_attr_delete_by_idx(): Test basic H5A (attribute) code. -** Tests deleting attribute by index -** -****************************************************************/ -static void -test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - H5A_info_t ainfo; /* Attribute information */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - H5_index_t idx_type; /* Type of index to operate on */ - H5_iter_order_t order; /* Order within in the index */ - unsigned use_index; /* Use index on creation order values */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - MESSAGE(5, ("Testing Deleting Attribute By Index\n")) - - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { - MESSAGE(5, (" SKIPPED\n")) - return; - } - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Loop over operating on different indices on link fields */ - for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) { - /* Loop over operating in different orders */ - for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) { - /* Loop over using index for creation order value */ - for (use_index = false; use_index <= true; use_index++) { - /* Print appropriate test message */ - if (idx_type == H5_INDEX_CRT_ORDER) { - if (order == H5_ITER_INC) { - if (use_index) - MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Increasing " - "Order w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Increasing " - "Order w/o Creation Order Index\n")) - } /* end if */ - else { - if (use_index) - MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Decreasing " - "Order w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Decreasing " - "Order w/o Creation Order Index\n")) - } /* end else */ - } /* end if */ - else { - if (order == H5_ITER_INC) { - if (use_index) - MESSAGE(5, ("Testing Deleting Attribute By Name Index in Increasing Order " - "w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Deleting Attribute By Name Index in Increasing Order w/o " - "Creation Order Index\n")) - } /* end if */ - else { - if (use_index) - MESSAGE(5, ("Testing Deleting Attribute By Name Index in Decreasing Order " - "w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Deleting Attribute By Name Index in Decreasing Order w/o " - "Creation Order Index\n")) - } - } /* end else */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Set attribute creation order tracking & indexing for object */ - if (new_format == true) { - ret = H5Pset_attr_creation_order( - dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - } /* end if */ - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for deleting non-existent attribute */ - H5E_BEGIN_TRY - { - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Adelete_by_idx"); - - /* Create attributes, up to limit of compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = - H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for out of bound deletions */ - H5E_BEGIN_TRY - { - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Adelete_by_idx"); - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Delete attributes from compact storage */ - for (u = 0; u < (max_compact - 1); u++) { - /* Delete first attribute in appropriate order */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); - - /* Verify the attribute information for first attribute in appropriate order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo, - H5P_DEFAULT); - if (new_format) { - if (order == H5_ITER_INC) { - VERIFY(ainfo.corder, (u + 1), "H5Aget_info_by_idx"); - } /* end if */ - else { - VERIFY(ainfo.corder, (max_compact - (u + 2)), "H5Aget_info_by_idx"); - } /* end else */ - } /* end if */ - - /* Verify the name for first attribute in appropriate order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - if (order == H5_ITER_INC) - snprintf(attrname, sizeof(attrname), "attr %02u", (u + 1)); - else - snprintf(attrname, sizeof(attrname), "attr %02u", (max_compact - (u + 2))); - ret = strcmp(attrname, tmpname); - VERIFY(ret, 0, "H5Aget_name_by_idx"); - } /* end for */ - - /* Delete last attribute */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); -#if 0 - /* Verify state of attribute storage (empty) */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); -#endif - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Create more attributes, to push into dense form */ - for (u = 0; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = - H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - if (u >= max_compact) { - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - } /* end if */ -#endif - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ -#endif - /* Check for out of bound deletion */ - H5E_BEGIN_TRY - { - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Adelete_by_idx"); - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Delete attributes from dense storage */ - for (u = 0; u < ((max_compact * 2) - 1); u++) { - /* Delete first attribute in appropriate order */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); - - /* Verify the attribute information for first attribute in appropriate order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo, - H5P_DEFAULT); - if (new_format) { - if (order == H5_ITER_INC) { - VERIFY(ainfo.corder, (u + 1), "H5Aget_info_by_idx"); - } /* end if */ - else { - VERIFY(ainfo.corder, ((max_compact * 2) - (u + 2)), "H5Aget_info_by_idx"); - } /* end else */ - } /* end if */ - - /* Verify the name for first attribute in appropriate order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - if (order == H5_ITER_INC) - snprintf(attrname, sizeof(attrname), "attr %02u", (u + 1)); - else - snprintf(attrname, sizeof(attrname), "attr %02u", ((max_compact * 2) - (u + 2))); - ret = strcmp(attrname, tmpname); - VERIFY(ret, 0, "H5Aget_name_by_idx"); - } /* end for */ - - /* Delete last attribute */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); -#if 0 - /* Verify state of attribute storage (empty) */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); -#endif - /* Check for deletion on empty attribute storage again */ - H5E_BEGIN_TRY - { - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Adelete_by_idx"); - } /* end for */ - - /* Delete attributes in middle */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Create attributes, to push into dense form */ - for (u = 0; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = - H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - if (u >= max_compact) { - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - } /* end if */ -#endif - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Delete every other attribute from dense storage, in appropriate order */ - for (u = 0; u < max_compact; u++) { - /* Delete attribute */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); - - /* Verify the attribute information for first attribute in appropriate order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, &ainfo, - H5P_DEFAULT); - if (new_format) { - if (order == H5_ITER_INC) { - VERIFY(ainfo.corder, ((u * 2) + 1), "H5Aget_info_by_idx"); - } /* end if */ - else { - VERIFY(ainfo.corder, ((max_compact * 2) - ((u * 2) + 2)), - "H5Aget_info_by_idx"); - } /* end else */ - } /* end if */ - - /* Verify the name for first attribute in appropriate order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - if (order == H5_ITER_INC) - snprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 1)); - else - snprintf(attrname, sizeof(attrname), "attr %02u", - ((max_compact * 2) - ((u * 2) + 2))); - ret = strcmp(attrname, tmpname); - VERIFY(ret, 0, "H5Aget_name_by_idx"); - } /* end for */ - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Delete remaining attributes from dense storage, in appropriate order */ - for (u = 0; u < (max_compact - 1); u++) { - /* Delete attribute */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); - - /* Verify the attribute information for first attribute in appropriate order */ - memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo, - H5P_DEFAULT); - if (new_format) { - if (order == H5_ITER_INC) { - VERIFY(ainfo.corder, ((u * 2) + 3), "H5Aget_info_by_idx"); - } /* end if */ - else { - VERIFY(ainfo.corder, ((max_compact * 2) - ((u * 2) + 4)), - "H5Aget_info_by_idx"); - } /* end else */ - } /* end if */ - - /* Verify the name for first attribute in appropriate order */ - memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, - tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); - if (order == H5_ITER_INC) - snprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 3)); - else - snprintf(attrname, sizeof(attrname), "attr %02u", - ((max_compact * 2) - ((u * 2) + 4))); - ret = strcmp(attrname, tmpname); - VERIFY(ret, 0, "H5Aget_name_by_idx"); - } /* end for */ - - /* Delete last attribute */ - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_idx"); -#if 0 - /* Verify state of attribute storage (empty) */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); -#endif - /* Check for deletion on empty attribute storage again */ - H5E_BEGIN_TRY - { - ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Adelete_by_idx"); - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - } /* end for */ - } /* end for */ - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_delete_by_idx() */ - -/**************************************************************** -** -** attr_iterate2_cb(): Revised attribute operator -** -****************************************************************/ -static herr_t -attr_iterate2_cb(hid_t loc_id, const char *attr_name, const H5A_info_t *info, void *_op_data) -{ - attr_iter_info_t *op_data = (attr_iter_info_t *)_op_data; /* User data */ - char attrname[NAME_BUF_SIZE]; /* Object name */ - H5A_info_t my_info; /* Local attribute info */ - - /* Increment # of times the callback was called */ - op_data->ncalled++; - - /* Get the attribute information directly to compare */ - if (H5Aget_info_by_name(loc_id, ".", attr_name, &my_info, H5P_DEFAULT) < 0) - return (H5_ITER_ERROR); - - /* Check more things for revised attribute iteration (vs. older attribute iteration) */ - if (info) { - /* Check for correct order of iteration */ - /* (if we are operating in increasing or decreasing order) */ - if (op_data->order != H5_ITER_NATIVE) - if (info->corder != op_data->curr) - return (H5_ITER_ERROR); - - /* Compare attribute info structs */ - if (info->corder_valid != my_info.corder_valid) - return (H5_ITER_ERROR); - if (info->corder != my_info.corder) - return (H5_ITER_ERROR); - if (info->cset != my_info.cset) - return (H5_ITER_ERROR); - if (info->data_size != my_info.data_size) - return (H5_ITER_ERROR); - } /* end if */ - - /* Verify name of link */ - snprintf(attrname, sizeof(attrname), "attr %02u", (unsigned)my_info.corder); - if (strcmp(attr_name, attrname) != 0) - return (H5_ITER_ERROR); - - /* Check if we've visited this link before */ - if ((size_t)op_data->curr >= op_data->max_visit) - return (H5_ITER_ERROR); - if (op_data->visited[op_data->curr]) - return (H5_ITER_ERROR); - op_data->visited[op_data->curr] = true; - - /* Advance to next value, in correct direction */ - if (op_data->order != H5_ITER_DEC) - op_data->curr++; - else - op_data->curr--; - - /* Check for stopping in the middle of iterating */ - if (op_data->stop > 0) - if (--op_data->stop == 0) - return (CORDER_ITER_STOP); - - return (H5_ITER_CONT); -} /* end attr_iterate2_cb() */ - -#ifndef H5_NO_DEPRECATED_SYMBOLS - -/**************************************************************** -** -** attr_iterate1_cb(): Attribute operator -** -****************************************************************/ -#if 0 -static herr_t -attr_iterate1_cb(hid_t loc_id, const char *attr_name, void *_op_data) -{ - return (attr_iterate2_cb(loc_id, attr_name, NULL, _op_data)); -} /* end attr_iterate1_cb() */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - -/*------------------------------------------------------------------------- - * Function: attr_iterate2_fail_cb - * - * Purpose: Callback routine for iterating over attributes on object that - * always returns failure - * - * Return: Success: 0 - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -static int -attr_iterate2_fail_cb(hid_t H5_ATTR_UNUSED group_id, const char H5_ATTR_UNUSED *attr_name, - const H5A_info_t H5_ATTR_UNUSED *info, void H5_ATTR_UNUSED *_op_data) -{ - return (H5_ITER_ERROR); -} /* end attr_iterate2_fail_cb() */ - -/*------------------------------------------------------------------------- - * Function: attr_iterate_check - * - * Purpose: Check iteration over attributes on an object - * - * Return: Success: 0 - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -static int -attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, - unsigned max_attrs, attr_iter_info_t *iter_info) -{ - unsigned v; /* Local index variable */ - hsize_t skip; /* # of attributes to skip on object */ -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - unsigned oskip; /* # of attributes to skip on object, with H5Aiterate1 */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - int old_nerrs; /* Number of errors when entering this check */ - herr_t ret; /* Generic return value */ - - /* Retrieve the current # of reported errors */ - old_nerrs = nerrors; - - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { - SKIPPED(); - printf(" API functions for iterate aren't " - "supported with this connector\n"); - return 1; - } - - /* Iterate over attributes on object */ - iter_info->nskipped = (unsigned)(skip = 0); - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate2"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate2"); - for (v = 0; v < max_attrs; v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate2"); - - /* Iterate over attributes on object */ - iter_info->nskipped = (unsigned)(skip = 0); - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate_by_name"); - for (v = 0; v < max_attrs; v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate_by_name"); - - /* Iterate over attributes on object */ - iter_info->nskipped = (unsigned)(skip = 0); - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate_by_name"); - for (v = 0; v < max_attrs; v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate_by_name"); - -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - /* Iterate over attributes on object, with H5Aiterate1 */ - iter_info->nskipped = oskip = 0; - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate1"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate1"); - for (v = 0; v < max_attrs; v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate1"); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - - /* Skip over some attributes on object */ - iter_info->nskipped = (unsigned)(skip = max_attrs / 2); - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate2"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate2"); - if (order == H5_ITER_INC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v + (max_attrs / 2)], true, "H5Aiterate2"); - } /* end if */ - else if (order == H5_ITER_DEC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate2"); - } /* end if */ - else { - unsigned nvisit = 0; /* # of links visited */ - - assert(order == H5_ITER_NATIVE); - for (v = 0; v < max_attrs; v++) - if (iter_info->visited[v] == true) - nvisit++; - - VERIFY(skip, (max_attrs / 2), "H5Aiterate2"); - } /* end else */ - - /* Skip over some attributes on object */ - iter_info->nskipped = (unsigned)(skip = max_attrs / 2); - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate_by_name"); - if (order == H5_ITER_INC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v + (max_attrs / 2)], true, "H5Aiterate_by_name"); - } /* end if */ - else if (order == H5_ITER_DEC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate_by_name"); - } /* end if */ - else { - unsigned nvisit = 0; /* # of links visited */ - - assert(order == H5_ITER_NATIVE); - for (v = 0; v < max_attrs; v++) - if (iter_info->visited[v] == true) - nvisit++; - - VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name"); - } /* end else */ - - /* Skip over some attributes on object */ - iter_info->nskipped = (unsigned)(skip = max_attrs / 2); - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate_by_name"); - if (order == H5_ITER_INC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v + (max_attrs / 2)], true, "H5Aiterate_by_name"); - } /* end if */ - else if (order == H5_ITER_DEC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate_by_name"); - } /* end if */ - else { - unsigned nvisit = 0; /* # of links visited */ - - assert(order == H5_ITER_NATIVE); - for (v = 0; v < max_attrs; v++) - if (iter_info->visited[v] == true) - nvisit++; - - VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name"); - } /* end else */ - -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - /* Skip over some attributes on object, with H5Aiterate1 */ - iter_info->nskipped = oskip = max_attrs / 2; - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? (unsigned)oskip : ((max_attrs - 1) - oskip); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate1"); - - /* Verify that we visited all the links */ - VERIFY(oskip, max_attrs, "H5Aiterate1"); - if (order == H5_ITER_INC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v + (max_attrs / 2)], true, "H5Aiterate1"); - } /* end if */ - else if (order == H5_ITER_DEC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate1"); - } /* end if */ - else { - unsigned nvisit = 0; /* # of links visited */ - - assert(order == H5_ITER_NATIVE); - for (v = 0; v < max_attrs; v++) - if (iter_info->visited[v] == true) - nvisit++; - - VERIFY(skip, (max_attrs / 2), "H5Aiterate1"); - } /* end else */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - - /* Iterate over attributes on object, stopping in the middle */ - iter_info->nskipped = (unsigned)(skip = 0); - iter_info->order = order; - iter_info->stop = 3; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate2"); - VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate2"); - VERIFY(iter_info->ncalled, 3, "H5Aiterate2"); - - /* Iterate over attributes on object, stopping in the middle */ - iter_info->nskipped = (unsigned)(skip = 0); - iter_info->order = order; - iter_info->stop = 3; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate_by_name"); - VERIFY(iter_info->ncalled, 3, "H5Aiterate_by_name"); - - /* Iterate over attributes on object, stopping in the middle */ - iter_info->nskipped = (unsigned)(skip = 0); - iter_info->order = order; - iter_info->stop = 3; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate_by_name"); - VERIFY(iter_info->ncalled, 3, "H5Aiterate_by_name"); - -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - /* Iterate over attributes on object, stopping in the middle, with H5Aiterate1() */ - iter_info->nskipped = oskip = 0; - iter_info->order = order; - iter_info->stop = 3; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate1"); - VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate1"); - VERIFY(iter_info->ncalled, 3, "H5Aiterate1"); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - - /* Check for iteration routine indicating failure */ - skip = 0; - H5E_BEGIN_TRY - { - ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_fail_cb, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate2"); - - skip = 0; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_fail_cb, NULL, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - skip = 0; - H5E_BEGIN_TRY - { - ret = - H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_fail_cb, NULL, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - /* Retrieve current # of errors */ - if (old_nerrs == nerrors) - return (0); - else - return (-1); -} /* end attr_iterate_check() */ - -/**************************************************************** -** -** test_attr_iterate2(): Test basic H5A (attribute) code. -** Tests iterating over attributes by index -** -****************************************************************/ -static void -test_attr_iterate2(bool new_format, hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - H5_index_t idx_type; /* Type of index to operate on */ - H5_iter_order_t order; /* Order within in the index */ - attr_iter_info_t iter_info; /* Iterator info */ - bool *visited = NULL; /* Array of flags for visiting links */ - hsize_t idx; /* Start index for iteration */ - unsigned use_index; /* Use index on creation order values */ - const char *dsetname; /* Name of dataset for attributes */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { - return; - } - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Allocate the "visited link" array */ - iter_info.max_visit = max_compact * 2; - visited = (bool *)malloc(sizeof(bool) * iter_info.max_visit); - CHECK_PTR(visited, "malloc"); - iter_info.visited = visited; - - /* Loop over operating on different indices on link fields */ - for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) { - /* Loop over operating in different orders */ - for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) { - /* Loop over using index for creation order value */ - for (use_index = false; use_index <= true; use_index++) { - /* Print appropriate test message */ - if (idx_type == H5_INDEX_CRT_ORDER) { - if (order == H5_ITER_INC) { - if (use_index) - MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " - "Increasing Order w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " - "Increasing Order w/o Creation Order Index\n")) - } /* end if */ - else { - if (use_index) - MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " - "Decreasing Order w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in " - "Decreasing Order w/o Creation Order Index\n")) - } /* end else */ - } /* end if */ - else { - if (order == H5_ITER_INC) { - if (use_index) - MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Increasing Order " - "w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Increasing Order " - "w/o Creation Order Index\n")) - } /* end if */ - else { - if (use_index) - MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Decreasing Order " - "w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Decreasing Order " - "w/o Creation Order Index\n")) - } /* end else */ - } /* end else */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Set attribute creation order tracking & indexing for object */ - if (new_format == true) { - ret = H5Pset_attr_creation_order( - dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - } /* end if */ - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - dsetname = DSET1_NAME; - break; - - case 1: - my_dataset = dset2; - dsetname = DSET2_NAME; - break; - - case 2: - my_dataset = dset3; - dsetname = DSET3_NAME; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for iterating over object with no attributes (should be OK) */ - ret = H5Aiterate2(my_dataset, idx_type, order, NULL, attr_iterate2_cb, NULL); - CHECK(ret, FAIL, "H5Aiterate2"); - - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, NULL, attr_iterate2_cb, NULL, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - - ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, NULL, attr_iterate2_cb, NULL, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aiterate_by_name"); - - /* Create attributes, up to limit of compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = - H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) { - /* Check for out of bound iteration */ - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate2"); - - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, - NULL, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, - NULL, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - /* Test iteration over attributes stored compactly */ - ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); - CHECK(ret, FAIL, "attr_iterate_check"); - } - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - dsetname = DSET1_NAME; - break; - - case 1: - my_dataset = dset2; - dsetname = DSET2_NAME; - break; - - case 2: - my_dataset = dset3; - dsetname = DSET3_NAME; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Create more attributes, to push into dense form */ - for (u = max_compact; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = - H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - if (u >= max_compact) { - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - } /* end if */ -#endif - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } -#endif - - if (vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) { - /* Check for out of bound iteration */ - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate2"); - - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, - NULL, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, - NULL, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - /* Test iteration over attributes stored densely */ - ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); - CHECK(ret, FAIL, "attr_iterate_check"); - } - } - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - } /* end for */ - } /* end for */ - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Free the "visited link" array */ - free(visited); -} /* test_attr_iterate2() */ - -/*------------------------------------------------------------------------- - * Function: attr_open_by_idx_check - * - * Purpose: Check opening attribute by index on an object - * - * Return: Success: 0 - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -static int -attr_open_by_idx_check(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, unsigned max_attrs) -{ - hid_t attr_id; /* ID of attribute to test */ - H5A_info_t ainfo; /* Attribute info */ - int old_nerrs; /* Number of errors when entering this check */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Retrieve the current # of reported errors */ - old_nerrs = nerrors; - - /* Open each attribute on object by index and check that it's the correct one */ - for (u = 0; u < max_attrs; u++) { - /* Open the attribute */ - attr_id = H5Aopen_by_idx(obj_id, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Aopen_by_idx"); - - /* Get the attribute's information */ - ret = H5Aget_info(attr_id, &ainfo); - CHECK(ret, FAIL, "H5Aget_info"); - - /* Check that the object is the correct one */ - if (order == H5_ITER_INC) { - VERIFY(ainfo.corder, u, "H5Aget_info"); - } /* end if */ - else if (order == H5_ITER_DEC) { - VERIFY(ainfo.corder, (max_attrs - (u + 1)), "H5Aget_info"); - } /* end if */ - else { - /* XXX: What to do about native order? */ - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Retrieve current # of errors */ - if (old_nerrs == nerrors) - return (0); - else - return (-1); -} /* end attr_open_by_idx_check() */ - -/**************************************************************** -** -** test_attr_open_by_idx(): Test basic H5A (attribute) code. -** Tests opening attributes by index -** -****************************************************************/ -static void -test_attr_open_by_idx(bool new_format, hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - H5_index_t idx_type; /* Type of index to operate on */ - H5_iter_order_t order; /* Order within in the index */ - unsigned use_index; /* Use index on creation order values */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { - return; - } - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Loop over operating on different indices on link fields */ - for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) { - /* Loop over operating in different orders */ - for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) { - /* Loop over using index for creation order value */ - for (use_index = false; use_index <= true; use_index++) { - /* Print appropriate test message */ - if (idx_type == H5_INDEX_CRT_ORDER) { - if (order == H5_ITER_INC) { - if (use_index) - MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Increasing " - "Order w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Increasing " - "Order w/o Creation Order Index\n")) - } /* end if */ - else { - if (use_index) - MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Decreasing " - "Order w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Decreasing " - "Order w/o Creation Order Index\n")) - } /* end else */ - } /* end if */ - else { - if (order == H5_ITER_INC) { - if (use_index) - MESSAGE(5, ("Testing Opening Attributes By Name Index in Increasing Order " - "w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Opening Attributes By Name Index in Increasing Order w/o " - "Creation Order Index\n")) - } /* end if */ - else { - if (use_index) - MESSAGE(5, ("Testing Opening Attributes By Name Index in Decreasing Order " - "w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Opening Attributes By Name Index in Decreasing Order w/o " - "Creation Order Index\n")) - } /* end else */ - } /* end else */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Set attribute creation order tracking & indexing for object */ - if (new_format == true) { - ret = H5Pset_attr_creation_order( - dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - } /* end if */ - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for opening an attribute on an object with no attributes */ - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); - - /* Create attributes, up to limit of compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = - H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for out of bound opening an attribute on an object */ - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); - - /* Test opening attributes by index stored compactly */ - ret = attr_open_by_idx_check(my_dataset, idx_type, order, u); - CHECK(ret, FAIL, "attr_open_by_idx_check"); - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - break; - - case 1: - my_dataset = dset2; - break; - - case 2: - my_dataset = dset3; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Create more attributes, to push into dense form */ - for (u = max_compact; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = - H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - if (u >= max_compact) { - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - } /* end if */ -#endif - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ -#endif - /* Check for out of bound opening an attribute on an object */ - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_idx"); - - /* Test opening attributes by index stored compactly */ - ret = attr_open_by_idx_check(my_dataset, idx_type, order, u); - CHECK(ret, FAIL, "attr_open_by_idx_check"); - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - } /* end for */ - } /* end for */ - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_open_by_idx() */ - -/*------------------------------------------------------------------------- - * Function: attr_open_check - * - * Purpose: Check opening attribute on an object - * - * Return: Success: 0 - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -static int -attr_open_check(hid_t fid, const char *dsetname, hid_t obj_id, unsigned max_attrs) -{ - hid_t attr_id; /* ID of attribute to test */ - H5A_info_t ainfo; /* Attribute info */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - int old_nerrs; /* Number of errors when entering this check */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Retrieve the current # of reported errors */ - old_nerrs = nerrors; - - /* Open each attribute on object by index and check that it's the correct one */ - for (u = 0; u < max_attrs; u++) { - /* Open the attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr_id = H5Aopen(obj_id, attrname, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Aopen"); - - /* Get the attribute's information */ - ret = H5Aget_info(attr_id, &ainfo); - CHECK(ret, FAIL, "H5Aget_info"); - - /* Check that the object is the correct one */ - VERIFY(ainfo.corder, u, "H5Aget_info"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open the attribute */ - attr_id = H5Aopen_by_name(obj_id, ".", attrname, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Aopen_by_name"); - - /* Get the attribute's information */ - ret = H5Aget_info(attr_id, &ainfo); - CHECK(ret, FAIL, "H5Aget_info"); - - /* Check that the object is the correct one */ - VERIFY(ainfo.corder, u, "H5Aget_info"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open the attribute */ - attr_id = H5Aopen_by_name(fid, dsetname, attrname, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Aopen_by_name"); - - /* Get the attribute's information */ - ret = H5Aget_info(attr_id, &ainfo); - CHECK(ret, FAIL, "H5Aget_info"); - - /* Check that the object is the correct one */ - VERIFY(ainfo.corder, u, "H5Aget_info"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Retrieve current # of errors */ - if (old_nerrs == nerrors) - return (0); - else - return (-1); -} /* end attr_open_check() */ - -/**************************************************************** -** -** test_attr_open_by_name(): Test basic H5A (attribute) code. -** Tests opening attributes by name -** -****************************************************************/ -static void -test_attr_open_by_name(bool new_format, hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - unsigned use_index; /* Use index on creation order values */ - const char *dsetname; /* Name of dataset for attributes */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Loop over using index for creation order value */ - for (use_index = false; use_index <= true; use_index++) { - /* Print appropriate test message */ - if (use_index) - MESSAGE(5, ("Testing Opening Attributes By Name w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Opening Attributes By Name w/o Creation Order Index\n")) - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Set attribute creation order tracking & indexing for object */ - if (new_format == true) { - ret = H5Pset_attr_creation_order( - dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - } /* end if */ - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - dsetname = DSET1_NAME; - break; - - case 1: - my_dataset = dset2; - dsetname = DSET2_NAME; - break; - - case 2: - my_dataset = dset3; - dsetname = DSET3_NAME; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for opening a non-existent attribute on an object with no attributes */ - H5E_BEGIN_TRY - { - ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen"); - - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - - /* Create attributes, up to limit of compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Check for opening a non-existent attribute on an object with compact attribute storage */ - H5E_BEGIN_TRY - { - ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen"); - - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - - /* Test opening attributes stored compactly */ - ret = attr_open_check(fid, dsetname, my_dataset, u); - CHECK(ret, FAIL, "attr_open_check"); - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - dsetname = DSET1_NAME; - break; - - case 1: - my_dataset = dset2; - dsetname = DSET2_NAME; - break; - - case 2: - my_dataset = dset3; - dsetname = DSET3_NAME; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Create more attributes, to push into dense form */ - for (u = max_compact; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - if (u >= max_compact) { - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - } /* end if */ -#endif - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ -#endif - /* Check for opening a non-existent attribute on an object with dense attribute storage */ - H5E_BEGIN_TRY - { - ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen"); - - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - - H5E_BEGIN_TRY - { - ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Aopen_by_name"); - - /* Test opening attributes stored compactly */ - ret = attr_open_check(fid, dsetname, my_dataset, u); - CHECK(ret, FAIL, "attr_open_check"); - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_open_by_name() */ - -/**************************************************************** -** -** test_attr_create_by_name(): Test basic H5A (attribute) code. -** Tests creating attributes by name -** -****************************************************************/ -static void -test_attr_create_by_name(bool new_format, hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dset1, dset2, dset3; /* Dataset IDs */ - hid_t my_dataset; /* Current dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - unsigned max_compact; /* Maximum # of links to store in group compactly */ - unsigned min_dense; /* Minimum # of links to store in group "densely" */ -#if 0 - htri_t is_empty; /* Are there any attributes? */ - htri_t is_dense; /* Are attributes stored densely? */ - hsize_t nattrs; /* Number of attributes on object */ - hsize_t name_count; /* # of records in name index */ - hsize_t corder_count; /* # of records in creation order index */ -#endif - unsigned use_index; /* Use index on creation order values */ - const char *dsetname; /* Name of dataset for attributes */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned curr_dset; /* Current dataset to work on */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Create dataspace for dataset & attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset creation property list */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Query the attribute creation properties */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Loop over using index for creation order value */ - for (use_index = false; use_index <= true; use_index++) { - /* Print appropriate test message */ - if (use_index) - MESSAGE(5, ("Testing Creating Attributes By Name w/Creation Order Index\n")) - else - MESSAGE(5, ("Testing Creating Attributes By Name w/o Creation Order Index\n")) - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Set attribute creation order tracking & indexing for object */ - if (new_format == true) { - ret = H5Pset_attr_creation_order( - dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0))); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - } /* end if */ - - /* Create datasets */ - dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dcreate2"); - dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset3, FAIL, "H5Dcreate2"); - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - dsetname = DSET1_NAME; - break; - - case 1: - my_dataset = dset2; - dsetname = DSET2_NAME; - break; - - case 2: - my_dataset = dset3; - dsetname = DSET3_NAME; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ -#if 0 - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Create attributes, up to limit of compact form */ - for (u = 0; u < max_compact; u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, - H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate_by_name"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Test opening attributes stored compactly */ - ret = attr_open_check(fid, dsetname, my_dataset, u); - CHECK(ret, FAIL, "attr_open_check"); - } /* end for */ - - /* Work on all the datasets */ - for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) { - switch (curr_dset) { - case 0: - my_dataset = dset1; - dsetname = DSET1_NAME; - break; - - case 1: - my_dataset = dset2; - dsetname = DSET2_NAME; - break; - - case 2: - my_dataset = dset3; - dsetname = DSET3_NAME; - break; - - default: - assert(0 && "Too many datasets!"); - } /* end switch */ - - /* Create more attributes, to push into dense form */ - for (u = max_compact; u < (max_compact * 2); u++) { - /* Create attribute */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, - H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate_by_name"); - - /* Write data into the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Verify state of object */ - if (u >= max_compact) { - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - } /* end if */ -#endif - /* Verify information for new attribute */ - ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); - CHECK(ret, FAIL, "attr_info_by_idx_check"); - } /* end for */ -#if 0 - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ -#endif - /* Test opening attributes stored compactly */ - ret = attr_open_check(fid, dsetname, my_dataset, u); - CHECK(ret, FAIL, "attr_open_check"); - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_create_by_name() */ - -/**************************************************************** -** -** test_attr_shared_write(): Test basic H5A (attribute) code. -** Tests writing mix of shared & un-shared attributes in "compact" & "dense" storage -** -****************************************************************/ -static void -test_attr_shared_write(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t my_fcpl; /* File creation property list ID */ - hid_t dataset, dataset2; /* Dataset IDs */ - hid_t attr_tid; /* Attribute's datatype ID */ - hid_t sid, big_sid; /* Dataspace IDs */ - hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ - htri_t is_shared; /* Is attributes shared? */ - hsize_t shared_refcount; /* Reference count of shared attribute */ -#endif - unsigned attr_value; /* Attribute value */ - unsigned *big_value; /* Data for "big" attribute */ -#if 0 - size_t mesg_count; /* # of shared messages */ -#endif - unsigned test_shared; /* Index over shared component type */ - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Writing Shared & Unshared Attributes in Compact & Dense Storage\n")); - - /* Allocate & initialize "big" attribute data */ - big_value = (unsigned *)malloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); - CHECK_PTR(big_value, "malloc"); - memset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create "big" dataspace for "large" attributes */ - big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); - CHECK(big_sid, FAIL, "H5Screate_simple"); - - /* Loop over type of shared components */ - for (test_shared = 0; test_shared < 3; test_shared++) { - /* Make copy of file creation property list */ - my_fcpl = H5Pcopy(fcpl); - CHECK(my_fcpl, FAIL, "H5Pcopy"); - - /* Set up datatype for attributes */ - attr_tid = H5Tcopy(H5T_NATIVE_UINT); - CHECK(attr_tid, FAIL, "H5Tcopy"); - - /* Special setup for each type of shared components */ - if (test_shared == 0) { - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end if */ - else { - /* Set up copy of file creation property list */ - - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - - /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end else */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close FCPL copy */ - ret = H5Pclose(my_fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Commit datatype to file */ - if (test_shared == 2) { - ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - } /* end if */ - - /* Set up to query the object creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Create datasets */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset2, FAIL, "H5Dcreate2"); - - /* Check on dataset's message storage status */ - if (test_shared != 0) { -#if 0 - /* Datasets' datatypes can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); - - /* Datasets' dataspace can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); -#endif - } /* end if */ - - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes to each dataset, until after converting to dense storage */ - for (u = 0; u < max_compact * 2; u++) { - /* Create attribute name */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - } /* end for */ - - /* Close attribute's datatype */ - ret = H5Tclose(attr_tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Datasets */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dataset2); - CHECK(ret, FAIL, "H5Dclose"); -#if 0 - /* Check on shared message status now */ - if (test_shared != 0) { - if (test_shared == 1) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); - } /* end if */ - - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Unlink datasets with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Unlink committed datatype */ - if (test_shared == 2) { - ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end if */ -#if 0 - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif - } /* end for */ - - /* Close dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(big_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Release memory */ - free(big_value); -} /* test_attr_shared_write() */ - -/**************************************************************** -** -** test_attr_shared_rename(): Test basic H5A (attribute) code. -** Tests renaming shared attributes in "compact" & "dense" storage -** -****************************************************************/ -static void -test_attr_shared_rename(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* HDF5 File ID */ - hid_t my_fcpl; /* File creation property list ID */ - hid_t dataset, dataset2; /* Dataset ID2 */ - hid_t attr_tid; /* Attribute's datatype ID */ - hid_t sid, big_sid; /* Dataspace IDs */ - hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */ - char attrname2[NAME_BUF_SIZE]; /* Name of attribute on second dataset */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ - htri_t is_shared; /* Is attributes shared? */ - hsize_t shared_refcount; /* Reference count of shared attribute */ -#endif - unsigned attr_value; /* Attribute value */ - unsigned *big_value; /* Data for "big" attribute */ -#if 0 - size_t mesg_count; /* # of shared messages */ -#endif - unsigned test_shared; /* Index over shared component type */ - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Renaming Shared & Unshared Attributes in Compact & Dense Storage\n")); - - /* Allocate & initialize "big" attribute data */ - big_value = (unsigned *)malloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); - CHECK_PTR(big_value, "malloc"); - memset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create "big" dataspace for "large" attributes */ - big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); - CHECK(big_sid, FAIL, "H5Screate_simple"); - - /* Loop over type of shared components */ - for (test_shared = 0; test_shared < 3; test_shared++) { - /* Make copy of file creation property list */ - my_fcpl = H5Pcopy(fcpl); - CHECK(my_fcpl, FAIL, "H5Pcopy"); - - /* Set up datatype for attributes */ - attr_tid = H5Tcopy(H5T_NATIVE_UINT); - CHECK(attr_tid, FAIL, "H5Tcopy"); - - /* Special setup for each type of shared components */ - if (test_shared == 0) { - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end if */ - else { - /* Set up copy of file creation property list */ - - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - - /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end else */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close FCPL copy */ - ret = H5Pclose(my_fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Commit datatype to file */ - if (test_shared == 2) { - ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - } /* end if */ - - /* Set up to query the object creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Create datasets */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset2, FAIL, "H5Dcreate2"); -#if 0 - /* Check on dataset's message storage status */ - if (test_shared != 0) { - /* Datasets' datatypes can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); - - /* Datasets' dataspace can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes to each dataset, until after converting to dense storage */ - for (u = 0; u < max_compact * 2; u++) { - /* Create attribute name */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Create new attribute name */ - snprintf(attrname2, sizeof(attrname2), "new attr %02u", u); - - /* Change second dataset's attribute's name */ - ret = H5Arename_by_name(fid, DSET2_NAME, attrname, attrname2, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Arename_by_name"); - - /* Check refcount on attributes now */ - - /* Check refcount on renamed attribute */ - attr = H5Aopen(dataset2, attrname2, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); -#if 0 - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Check refcount on original attribute */ - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); -#if 0 - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Change second dataset's attribute's name back to original */ - ret = H5Arename_by_name(fid, DSET2_NAME, attrname2, attrname, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Arename_by_name"); - - /* Check refcount on attributes now */ - - /* Check refcount on renamed attribute */ - attr = H5Aopen(dataset2, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); -#if 0 - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); - } /* end else */ -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Check refcount on original attribute */ - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); -#if 0 - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); - } /* end else */ -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Close attribute's datatype */ - ret = H5Tclose(attr_tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Datasets */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dataset2); - CHECK(ret, FAIL, "H5Dclose"); -#if 0 - /* Check on shared message status now */ - if (test_shared != 0) { - if (test_shared == 1) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); - } /* end if */ - - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Unlink datasets with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "HLdelete"); - ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Unlink committed datatype */ - if (test_shared == 2) { - ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end if */ -#if 0 - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif - } /* end for */ - - /* Close dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(big_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Release memory */ - free(big_value); -} /* test_attr_shared_rename() */ - -/**************************************************************** -** -** test_attr_shared_delete(): Test basic H5A (attribute) code. -** Tests deleting shared attributes in "compact" & "dense" storage -** -****************************************************************/ -static void -test_attr_shared_delete(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t my_fcpl; /* File creation property list ID */ - hid_t dataset, dataset2; /* Dataset IDs */ - hid_t attr_tid; /* Attribute's datatype ID */ - hid_t sid, big_sid; /* Dataspace IDs */ - hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ - htri_t is_shared; /* Is attributes shared? */ - hsize_t shared_refcount; /* Reference count of shared attribute */ -#endif - unsigned attr_value; /* Attribute value */ - unsigned *big_value; /* Data for "big" attribute */ -#if 0 - size_t mesg_count; /* # of shared messages */ -#endif - unsigned test_shared; /* Index over shared component type */ - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deleting Shared & Unshared Attributes in Compact & Dense Storage\n")); - - /* Allocate & initialize "big" attribute data */ - big_value = (unsigned *)malloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); - CHECK_PTR(big_value, "malloc"); - memset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create "big" dataspace for "large" attributes */ - big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); - CHECK(big_sid, FAIL, "H5Screate_simple"); - - /* Loop over type of shared components */ - for (test_shared = 0; test_shared < 3; test_shared++) { - /* Make copy of file creation property list */ - my_fcpl = H5Pcopy(fcpl); - CHECK(my_fcpl, FAIL, "H5Pcopy"); - - /* Set up datatype for attributes */ - attr_tid = H5Tcopy(H5T_NATIVE_UINT); - CHECK(attr_tid, FAIL, "H5Tcopy"); - - /* Special setup for each type of shared components */ - if (test_shared == 0) { - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end if */ - else { - /* Set up copy of file creation property list */ - - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - - /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end else */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close FCPL copy */ - ret = H5Pclose(my_fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Commit datatype to file */ - if (test_shared == 2) { - ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - } /* end if */ - - /* Set up to query the object creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Create datasets */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset2, FAIL, "H5Dcreate2"); -#if 0 - /* Check on dataset's message storage status */ - if (test_shared != 0) { - /* Datasets' datatypes can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); - - /* Datasets' dataspace can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes to each dataset, until after converting to dense storage */ - for (u = 0; u < max_compact * 2; u++) { - /* Create attribute name */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - } /* end for */ - - /* Delete attributes from second dataset */ - for (u = 0; u < max_compact * 2; u++) { - /* Create attribute name */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - - /* Delete second dataset's attribute */ - ret = H5Adelete_by_name(fid, DSET2_NAME, attrname, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Adelete_by_name"); - - /* Check refcount on attributes now */ - - /* Check refcount on first dataset's attribute */ - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); -#if 0 - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Close attribute's datatype */ - ret = H5Tclose(attr_tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Datasets */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dataset2); - CHECK(ret, FAIL, "H5Dclose"); -#if 0 - /* Check on shared message status now */ - if (test_shared != 0) { - if (test_shared == 1) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); - } /* end if */ - - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Unlink datasets with attributes */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Unlink committed datatype */ - if (test_shared == 2) { - ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end if */ -#if 0 - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif - } /* end for */ - - /* Close dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(big_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Release memory */ - free(big_value); -} /* test_attr_shared_delete() */ - -/**************************************************************** -** -** test_attr_shared_unlink(): Test basic H5A (attribute) code. -** Tests unlinking object with shared attributes in "compact" & "dense" storage -** -****************************************************************/ -static void -test_attr_shared_unlink(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t my_fcpl; /* File creation property list ID */ - hid_t dataset, dataset2; /* Dataset IDs */ - hid_t attr_tid; /* Attribute's datatype ID */ - hid_t sid, big_sid; /* Dataspace IDs */ - hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */ - hid_t attr; /* Attribute ID */ - hid_t dcpl; /* Dataset creation property list ID */ - char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */ - unsigned max_compact; /* Maximum # of attributes to store compactly */ - unsigned min_dense; /* Minimum # of attributes to store "densely" */ -#if 0 - htri_t is_dense; /* Are attributes stored densely? */ - htri_t is_shared; /* Is attributes shared? */ - hsize_t shared_refcount; /* Reference count of shared attribute */ -#endif - unsigned attr_value; /* Attribute value */ - unsigned *big_value; /* Data for "big" attribute */ -#if 0 - size_t mesg_count; /* # of shared messages */ -#endif - unsigned test_shared; /* Index over shared component type */ - unsigned u; /* Local index variable */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Unlinking Object with Shared Attributes in Compact & Dense Storage\n")); - - /* Allocate & initialize "big" attribute data */ - big_value = (unsigned *)malloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned)); - CHECK_PTR(big_value, "malloc"); - memset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3)); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create "big" dataspace for "large" attributes */ - big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL); - CHECK(big_sid, FAIL, "H5Screate_simple"); - - /* Loop over type of shared components */ - for (test_shared = 0; test_shared < 3; test_shared++) { - /* Make copy of file creation property list */ - my_fcpl = H5Pcopy(fcpl); - CHECK(my_fcpl, FAIL, "H5Pcopy"); - - /* Set up datatype for attributes */ - attr_tid = H5Tcopy(H5T_NATIVE_UINT); - CHECK(attr_tid, FAIL, "H5Tcopy"); - - /* Special setup for each type of shared components */ - if (test_shared == 0) { - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end if */ - else { - /* Set up copy of file creation property list */ - - ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - - /* Make attributes > 500 bytes shared */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - - /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */ - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - } /* end else */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close FCPL copy */ - ret = H5Pclose(my_fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); -#endif - - /* Re-open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Commit datatype to file */ - if (test_shared == 2) { - ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - } /* end if */ - - /* Set up to query the object creation properties */ - if (dcpl_g == H5P_DEFAULT) { - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - } - else { - dcpl = H5Pcopy(dcpl_g); - CHECK(dcpl, FAIL, "H5Pcopy"); - } - - /* Create datasets */ - dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset2, FAIL, "H5Dcreate2"); -#if 0 - /* Check on dataset's message storage status */ - if (test_shared != 0) { - /* Datasets' datatypes can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); - - /* Datasets' dataspace can be shared */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Retrieve limits for compact/dense attribute storage */ - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Close property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); -#endif - /* Add attributes to each dataset, until after converting to dense storage */ - for (u = 0; u < max_compact * 2; u++) { - /* Create attribute name */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on first dataset */ - attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* ChecFk that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - - /* Alternate between creating "small" & "big" attributes */ - if (u % 2) { - /* Create "small" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); -#endif - /* Write data into the attribute */ - attr_value = u + 1; - ret = H5Awrite(attr, attr_tid, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - } /* end if */ - else { - /* Create "big" attribute on second dataset */ - attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); -#if 0 - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); -#endif - /* Write data into the attribute */ - big_value[0] = u + 1; - ret = H5Awrite(attr, attr_tid, big_value); - CHECK(ret, FAIL, "H5Awrite"); -#if 0 - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); -#endif - } /* end else */ - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); -#if 0 - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - } /* end for */ - - /* Close attribute's datatype */ - ret = H5Tclose(attr_tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close second dataset */ - ret = H5Dclose(dataset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink second dataset */ - ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - -#if 0 - /* Check on first dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); -#endif - /* Check ref count on attributes of first dataset */ - for (u = 0; u < max_compact * 2; u++) { - /* Create attribute name */ - snprintf(attrname, sizeof(attrname), "attr %02u", u); - - /* Open attribute on first dataset */ - attr = H5Aopen(dataset, attrname, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); -#if 0 - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ -#endif - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Close Datasets */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Unlink first dataset */ - ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Unlink committed datatype */ - if (test_shared == 2) { - ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end if */ -#if 0 - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ -#endif - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - if (h5_using_default_driver(NULL)) { - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); - } -#endif - } /* end for */ - - /* Close dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(big_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Release memory */ - free(big_value); -} /* test_attr_shared_unlink() */ - -/**************************************************************** -** -** test_attr_bug1(): Test basic H5A (attribute) code. -** Tests odd sequence of allocating and deallocating space in the file. -** The series of actions below constructs a file with an attribute -** in each object header chunk, except the first. Then, the attributes -** are removed and re-created in a way that makes the object header -** allocation code remove an object header chunk "in the middle" of -** the sequence of the chunks. -** -****************************************************************/ -static void -test_attr_bug1(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - hid_t aid; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Allocating and De-allocating Attributes in Unusual Way\n")); - - /* Create dataspace ID for attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create main group to operate on */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file and create another group, then attribute on first group */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create second group */ - gid = H5Gcreate2(fid, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Re-open first group */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create attribute on first group */ - aid = H5Acreate2(gid, ATTR7_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file and create another group, then another attribute on first group */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create third group */ - gid = H5Gcreate2(fid, GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Unlink second group */ - ret = H5Ldelete(fid, GROUP2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Re-open first group */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create another attribute on first group */ - aid = H5Acreate2(gid, ATTR8_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file and re-create attributes on first group */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open first group */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Delete first attribute */ - ret = H5Adelete(gid, ATTR7_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Re-create first attribute */ - aid = H5Acreate2(gid, ATTR7_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Delete second attribute */ - ret = H5Adelete(gid, ATTR8_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Re-create second attribute */ - aid = H5Acreate2(gid, ATTR8_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close dataspace ID */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Gclose"); -} /* test_attr_bug1() */ - -/**************************************************************** -** -** test_attr_bug2(): Test basic H5A (attribute) code. -** Tests deleting a large number of attributes with the -** intention of creating a null message with a size that -** is too large. This routine deletes every other -** attribute, but the original bug could also be -** reproduced by deleting every attribute except a few to -** keep the chunk open. -** -****************************************************************/ -static void -test_attr_bug2(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - hid_t aid; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hid_t gcpl; /* Group creation property list */ - hsize_t dims[2] = {10, 100}; /* Attribute dimensions */ - char aname[16]; /* Attribute name */ - unsigned i; /* index */ - herr_t ret; /* Generic return status */ - htri_t tri_ret; /* htri_t return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Allocating and De-allocating Attributes in Unusual Way\n")); - - /* Create group creation property list */ - gcpl = H5Pcreate(H5P_GROUP_CREATE); - CHECK(gcpl, FAIL, "H5Pcreate"); - - /* Prevent the library from switching to dense attribute storage */ - /* Not doing this with the latest format actually triggers a different bug. - * This will be tested here as soon as it is fixed. -NAF - */ - ret = H5Pset_attr_phase_change(gcpl, BUG2_NATTR + 10, BUG2_NATTR + 5); - CHECK(ret, FAIL, "H5Pset_attr_phase_change"); - - /* Create dataspace ID for attributes */ - sid = H5Screate_simple(2, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create main group to operate on */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create attributes on group */ - for (i = 0; i < BUG2_NATTR; i++) { - snprintf(aname, sizeof(aname), "%03u", i); - aid = H5Acreate2(gid, aname, H5T_STD_I32LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - } - - /* Delete every other attribute */ - for (i = 1; i < BUG2_NATTR; i += 2) { - snprintf(aname, sizeof(aname), "%03u", i); - ret = H5Adelete(gid, aname); - CHECK(ret, FAIL, "H5Adelete"); - } - - /* Close IDs */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Reopen file and group */ - fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen"); - - /* Open an attribute in the middle */ - i = (BUG2_NATTR / 4) * 2; - snprintf(aname, sizeof(aname), "%03u", i); - aid = H5Aopen(gid, aname, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Aopen"); - - /* Verify that the attribute has the correct datatype */ - tid = H5Aget_type(aid); - CHECK(tid, FAIL, "H5Aget_type"); - - tri_ret = H5Tequal(tid, H5T_STD_I32LE); - VERIFY(tri_ret, true, "H5Tequal"); - - /* Close IDs */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Now test a variation on this bug - where either the size of chunk 0 goes - * down a "notch" or two, or chunk 1 becomes completely null at the same - * time that a null message that is too large is formed */ - dims[0] = 25; - dims[1] = 41; /* 1025*4 byte attribute size */ - - /* Create dataspace ID for attributes */ - sid = H5Screate_simple(2, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create main group to operate on */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create attributes on group */ - for (i = 0; i < BUG2_NATTR2; i++) { - snprintf(aname, sizeof(aname), "%03u", i); - aid = H5Acreate2(gid, aname, H5T_STD_I32LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - } - - /* Delete every other attribute */ - for (i = 0; i < BUG2_NATTR2; i++) { - snprintf(aname, sizeof(aname), "%03u", i); - ret = H5Adelete(gid, aname); - CHECK(ret, FAIL, "H5Adelete"); - } - - /* Close IDs */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Pclose(gcpl); - CHECK(ret, FAIL, "H5Pclose"); -} /* test_attr_bug2() */ - -/**************************************************************** -** -** test_attr_bug3(): Test basic H5A (attribute) code. -** Tests creating and deleting attributes which use a -** datatype and/or dataspace stored in the same object -** header. -** -****************************************************************/ -static void -test_attr_bug3(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t aid1, aid2; /* Attribute IDs */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t tid1, tid2; /* Datatype IDs */ - hid_t did; /* Dataset ID */ - hsize_t dims1[2] = {2, 2}, dims2[2] = {3, 3}; /* Dimensions */ - int wdata1[2][2]; - unsigned wdata2[3][3]; /* Write buffers */ - unsigned u, v; /* Local index variables */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Attributes in the Same Header as their Datatypes\n")); - - /* Create dataspaces */ - sid1 = H5Screate_simple(2, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - sid2 = H5Screate_simple(2, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create file to operate on */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create datatypes and commit tid1 */ - tid1 = H5Tcopy(H5T_STD_I16BE); - CHECK(tid1, FAIL, "H5Tcopy"); - tid2 = H5Tcopy(H5T_STD_U64LE); - CHECK(tid1, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "dtype", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create dataset */ - did = H5Dcreate2(fid, "dset", tid2, sid2, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Create attribute on datatype, using that datatype as its datatype */ - aid1 = H5Acreate2(tid1, "attr", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid1, FAIL, "H5Acreate2"); - - /* Create attribute on dataset, using its datatype and dataspace */ - aid2 = H5Acreate2(did, "attr", tid2, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid2, FAIL, "H5Acreate2"); - - /* Close attributes */ - ret = H5Aclose(aid1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aid2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Reopen attributes */ - aid1 = H5Aopen(tid1, "attr", H5P_DEFAULT); - CHECK(aid1, FAIL, "H5Aopen"); - aid2 = H5Aopen(did, "attr", H5P_DEFAULT); - CHECK(aid2, FAIL, "H5Aopen"); - - /* Initialize the write buffers */ - for (u = 0; u < dims1[0]; u++) - for (v = 0; v < dims1[1]; v++) - wdata1[u][v] = (int)((u * dims1[1]) + v); - for (u = 0; u < dims2[0]; u++) - for (v = 0; v < dims2[1]; v++) - wdata2[u][v] = (unsigned)((u * dims2[1]) + v); - - /* Write data to the attributes */ - ret = H5Awrite(aid1, H5T_NATIVE_INT, wdata1); - CHECK(ret, FAIL, "H5Awrite"); - ret = H5Awrite(aid2, H5T_NATIVE_UINT, wdata2); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attributes */ - ret = H5Aclose(aid1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aid2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Delete attributes */ - ret = H5Adelete(tid1, "attr"); - CHECK(ret, FAIL, "H5Adelete"); - ret = H5Adelete(did, "attr"); - CHECK(ret, FAIL, "H5Adelete"); - - /* Recreate attributes */ - aid1 = H5Acreate2(tid1, "attr", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid1, FAIL, "H5Acreate2"); - aid2 = H5Acreate2(did, "attr", tid2, sid2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid2, FAIL, "H5Acreate2"); - - /* Delete attributes (note they are still open) */ - ret = H5Adelete(tid1, "attr"); - CHECK(ret, FAIL, "H5Adelete"); - ret = H5Adelete(did, "attr"); - CHECK(ret, FAIL, "H5Adelete"); - - /* Close dataspaces and transient datatype */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close dataset and committed datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Delete dataset and committed datatype */ - ret = H5Ldelete(fid, "dtype", H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Ldelete(fid, "dset", H5P_DEFAULT); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close attributes */ - ret = H5Aclose(aid1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aid2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_bug3() */ - -/**************************************************************** -** -** test_attr_bug4(): Test basic H5A (attribute) code. -** Attempts to trigger a bug which would result in being -** unable to add an attribute to a named datatype. This -** happened when an object header chunk was too small to -** hold a continuation message and could not be extended. -** -****************************************************************/ -static void -test_attr_bug4(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - hid_t aid1, aid2, aid3; /* Attribute IDs */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hid_t did; /* Dataset ID */ - hsize_t dims[1] = {5}; /* Attribute dimensions */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing that attributes can always be added to named datatypes\n")); - - /* Create dataspace */ - sid = H5Screate_simple(1, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Open root group */ - gid = H5Gopen2(fid, "/", H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create committed datatype */ - tid = H5Tcopy(H5T_STD_I32LE); - CHECK(tid, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "dtype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create dataset */ - did = H5Dcreate2(fid, "dset", tid, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Create attributes on group and dataset */ - aid1 = H5Acreate2(gid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid1, FAIL, "H5Acreate2"); - aid2 = H5Acreate2(did, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid2, FAIL, "H5Acreate2"); - - /* Create attribute on datatype (this is the main test) */ - aid3 = H5Acreate2(tid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid3, FAIL, "H5Acreate2"); - - /* Close IDs */ - ret = H5Aclose(aid3); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(aid2); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Aclose(aid1); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_bug4() */ - -/**************************************************************** -** -** test_attr_bug5(): Test basic H5A (attribute) code. -** Tests opening an attribute multiple times through -** objects opened through different file handles. -** -****************************************************************/ -static void -test_attr_bug5(hid_t fcpl, hid_t fapl) -{ - hid_t fid1, fid2; /* File IDs */ - hid_t gid1, gid2; /* Group IDs */ - hid_t did1, did2; /* Dataset IDs */ - hid_t tid1, tid2; /* Datatype IDs */ - hid_t aidg1, aidg2, aidd1, aidd2, aidt1, aidt2; /* Attribute IDs */ - hid_t sid; /* Dataspace ID */ - hsize_t dims[1] = {5}; /* Attribute dimensions */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Opening an Attribute Through Multiple Files Concurrently\n")); - - /* Create dataspace ID for attributes and datasets */ - sid = H5Screate_simple(1, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Open root group */ - gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gopen2"); - - /* Create and commit datatype */ - tid1 = H5Tcopy(H5T_STD_I32LE); - CHECK(tid1, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid1, BUG3_DT_NAME, tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create dataset */ - did1 = H5Dcreate2(fid1, BUG3_DSET_NAME, tid1, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dcreate2"); - - /* Create attribute on root group */ - aidg1 = H5Acreate2(gid1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aidg1, FAIL, "H5Acreate2"); - - /* Create attribute on dataset */ - aidd1 = H5Acreate2(did1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aidd1, FAIL, "H5Acreate2"); - - /* Create attribute on datatype */ - aidt1 = H5Acreate2(tid1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aidt1, FAIL, "H5Acreate2"); - - /* Close all IDs */ - ret = H5Aclose(aidt1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aidd1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aidg1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Open file twice */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - fid2 = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Open the root group twice */ - gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gopen2"); - gid2 = H5Gopen2(fid2, "/", H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - - /* Open the root group attribute twice */ - aidg1 = H5Aopen(gid1, BUG3_ATTR_NAME, H5P_DEFAULT); - CHECK(aidg1, FAIL, "H5Aopen"); - aidg2 = H5Aopen(gid2, BUG3_ATTR_NAME, H5P_DEFAULT); - CHECK(aidg1, FAIL, "H5Aopen"); - - /* Open the dataset twice */ - did1 = H5Dopen2(fid1, BUG3_DSET_NAME, H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dopen2"); - did2 = H5Dopen2(fid2, BUG3_DSET_NAME, H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dopen2"); - - /* Open the dataset attribute twice */ - aidd1 = H5Aopen(did1, BUG3_ATTR_NAME, H5P_DEFAULT); - CHECK(aidd1, FAIL, "H5Aopen"); - aidd2 = H5Aopen(did2, BUG3_ATTR_NAME, H5P_DEFAULT); - CHECK(aidd1, FAIL, "H5Aopen"); - - /* Open the datatype twice */ - tid1 = H5Topen2(fid1, BUG3_DT_NAME, H5P_DEFAULT); - CHECK(tid1, FAIL, "H5Topen2"); - tid2 = H5Topen2(fid2, BUG3_DT_NAME, H5P_DEFAULT); - CHECK(tid2, FAIL, "H5Topen2"); - - /* Open the datatype attribute twice */ - aidt1 = H5Aopen(tid1, BUG3_ATTR_NAME, H5P_DEFAULT); - CHECK(aidt1, FAIL, "H5Aopen"); - aidt2 = H5Aopen(tid2, BUG3_ATTR_NAME, H5P_DEFAULT); - CHECK(aidt2, FAIL, "H5Aopen"); - - /* Close all attributes */ - ret = H5Aclose(aidg1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aidg2); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aidd1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aidd2); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aidt1); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Aclose(aidt2); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close root groups */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close datasets */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatypes */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close files */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_bug5() */ - -/**************************************************************** -** -** test_attr_bug6(): Test basic H5A (attribute) code. -** Tests if reading an empty attribute is OK. -** -****************************************************************/ -static void -test_attr_bug6(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - hid_t aid1, aid2; /* Attribute IDs */ - hid_t sid; /* Dataspace ID */ - hsize_t dims[ATTR1_RANK] = {ATTR1_DIM1}; /* Attribute dimensions */ - int intar[ATTR1_DIM1]; /* Data reading buffer */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing that empty attribute can be read\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Open root group */ - gid = H5Gopen2(fid, "/", H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create dataspace */ - sid = H5Screate_simple(1, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create attribute on group */ - aid1 = H5Acreate2(gid, ATTR1_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid1, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid1); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open the attribute again */ - aid2 = H5Aopen(gid, ATTR1_NAME, H5P_DEFAULT); - CHECK(aid2, FAIL, "H5Aopen"); - - ret = H5Aread(aid2, H5T_NATIVE_INT, intar); - CHECK(ret, FAIL, "H5Aread"); - - /* Close IDs */ - ret = H5Aclose(aid2); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_bug6() */ - -/**************************************************************** -** -** test_attr_bug7(): Test basic H5A (attribute) code. -** (Really tests object header allocation code). -** Tests creating and deleting attributes in such a way as -** to change the size of the "chunk #0 size" field. -** Includes testing "skipping" a possible size of the -** field, i.e. going from 1 to 4 bytes or 4 to 1 byte. -** -****************************************************************/ -#if 0 -static void -test_attr_bug7(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t aid; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims_s = 140; /* Small attribute dimensions */ - hsize_t dims_l = 65480; /* Large attribute dimensions */ - H5A_info_t ainfo; /* Attribute info */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing adding and deleting large attributes\n")); - - /* Create committed datatype to operate on. Use a committed datatype so that - * there is nothing after the object header and the first chunk can expand and - * contract as necessary. */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - tid = H5Tcopy(H5T_STD_I32LE); - CHECK(tid, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, TYPE1_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* - * Create small attribute - */ - sid = H5Screate_simple(1, &dims_s, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - aid = H5Acreate2(tid, ATTR1_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close file */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Check attribute */ - tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_s) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); - - /* - * Create another small attribute. Should cause chunk size field to expand by - * 1 byte (1->2). - */ - aid = H5Acreate2(tid, ATTR2_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close file */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Check attributes */ - tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_s) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); - ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_s) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); - - /* - * Create large attribute. Should cause chunk size field to expand by 2 bytes - * (2->4). - */ - ret = H5Sset_extent_simple(sid, 1, &dims_l, NULL); - CHECK(ret, FAIL, "H5Sset_extent_simple"); - aid = H5Acreate2(tid, ATTR3_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close file */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Check attributes */ - tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_s) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); - ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_s) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); - ret = H5Aget_info_by_name(tid, ".", ATTR3_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_l) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_l); - - /* - * Delete last two attributes - should merge into a null message that is too - * large, causing the chunk size field to shrink by 3 bytes (4->1). - */ - ret = H5Sset_extent_simple(sid, 1, &dims_l, NULL); - CHECK(ret, FAIL, "H5Sset_extent_simple"); - ret = H5Adelete(tid, ATTR2_NAME); - CHECK(ret, FAIL, "H5Adelete"); - ret = H5Adelete(tid, ATTR3_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Check attribute */ - tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_s) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); - - /* - * Create large attribute. Should cause chunk size field to expand by 3 bytes - * (1->4). - */ - aid = H5Acreate2(tid, ATTR2_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close file */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Check attributes */ - tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_s) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_s); - ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims_l) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims_l); - - /* Close IDs */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_bug7() */ -#endif - -/**************************************************************** -** -** test_attr_bug8(): Test basic H5A (attribute) code. -** (Really tests object header code). -** Tests adding a link and attribute to a group in such a -** way as to cause the "chunk #0 size" field to expand -** when some object header messages are not loaded into -** cache. Before the bug was fixed, this would prevent -** these messages from being shifted to the correct -** position as the expansion algorithm marked them dirty, -** invalidating the raw form, when there was no native -** form to encode. -** -****************************************************************/ -static void -test_attr_bug8(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t aid; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - hid_t gid; /* Group ID */ - hid_t oid; /* Object ID */ - hsize_t dims = 256; /* Attribute dimensions */ - H5O_info2_t oinfo; /* Object info */ - H5A_info_t ainfo; /* Attribute info */ - H5O_token_t root_token; /* Root group token */ - int cmp_value; /* Result from H5Otoken_cmp */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing attribute expanding object header with undecoded messages\n")); - - /* Create committed datatype to operate on. Use a committed datatype so that - * there is nothing after the object header and the first chunk can expand and - * contract as necessary. */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Get root group token */ - ret = H5Oget_info3(fid, &oinfo, H5O_INFO_BASIC); - CHECK(ret, FAIL, "H5Oget_info"); - root_token = oinfo.token; - - /* - * Create link to root group - */ - ret = H5Lcreate_hard(fid, "/", gid, LINK1_NAME, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_hard"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Check link */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - oid = H5Oopen(gid, LINK1_NAME, H5P_DEFAULT); - CHECK(oid, FAIL, "H5Oopen"); - ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC); - CHECK(ret, FAIL, "H5Oget_info"); - ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value); - CHECK(ret, FAIL, "H5Otoken_cmp"); - VERIFY(cmp_value, 0, "H5Otoken_cmp"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Oclose(oid); - CHECK(ret, FAIL, "H5Oclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* - * Create attribute. Should cause chunk size field to expand by 1 byte - * (1->2). - */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - sid = H5Screate_simple(1, &dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - aid = H5Acreate2(gid, ATTR1_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close file */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Check link and attribute */ - gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - oid = H5Oopen(gid, LINK1_NAME, H5P_DEFAULT); - CHECK(oid, FAIL, "H5Oopen"); - ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC); - CHECK(ret, FAIL, "H5Oget_info"); - ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value); - CHECK(ret, FAIL, "H5Otoken_cmp"); - VERIFY(cmp_value, 0, "H5Otoken_cmp"); - ret = H5Aget_info_by_name(gid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Aget_info_by_name"); - if (ainfo.data_size != dims) - TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n", - (long long unsigned)ainfo.data_size, (long long unsigned)dims); - - /* Close IDs */ - ret = H5Oclose(oid); - CHECK(ret, FAIL, "H5Oclose"); - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_attr_bug8() */ - -/**************************************************************** -** -** test_attr_bug9(): Test basic H5A (attribute) code. -** (Really tests object header code). -** Tests adding several large attributes to an object until -** they convert to dense storage. The total size of all -** attributes is larger than 64K, causing the internal -** object header code to, after merging the deleted -** messages in to a NULL message, shrink the object header -** chunk. Do this twice: once with only attributes in the -** object header chunk and once with a (small) soft link in -** the chunk as well. In both cases, the shrunk chunk will -** initially be too small and a new NULL message must be -** created. -** -****************************************************************/ -static void -test_attr_bug9(hid_t fcpl, hid_t fapl) -{ - hid_t fid = -1; /* File ID */ - hid_t gid = -1; /* Group ID */ - hid_t aid = -1; /* Attribute ID */ - hid_t sid = -1; /* Dataspace ID */ - hsize_t dims[1] = {32768}; /* Attribute dimensions */ - int create_link; /* Whether to create a soft link */ - unsigned max_compact; /* Setting from fcpl */ - unsigned min_dense; /* Setting from fcpl */ - char aname[11]; /* Attribute name */ - unsigned i; /* Local index variable */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing that attributes can always be added to named datatypes\n")); - - /* Create dataspace */ - sid = H5Screate_simple(1, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Obtain attribute phase change settings */ - ret = H5Pget_attr_phase_change(fcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Run with and without the soft link */ - for (create_link = 0; create_link < 2; create_link++) { - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create second group */ - gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Close second group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open root group */ - gid = H5Gopen2(fid, "/", H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create enough attributes to cause a change to dense storage */ - for (i = 0; i < max_compact + 1; i++) { - /* Create attribute */ - snprintf(aname, sizeof(aname), "%u", i); - aid = H5Acreate2(gid, aname, H5T_NATIVE_CHAR, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create enough soft links that exactly one goes into chunk 1 if - * requested */ - if (i == 0 && create_link) { - ret = H5Lcreate_soft("b", gid, "a", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - ret = H5Lcreate_soft("d", gid, "c", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - ret = H5Lcreate_soft("f", gid, "e", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - } /* end if */ - } /* end for */ - - /* Close IDs */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_bug9() */ - -/**************************************************************** -** -** test_attr_bug10(): Test basic H5A (attribute) code. -** Attempts to trigger a bug which would result in a -** segfault. Create a vlen attribute through a file -** handle, then open the same file through a different -** handle, open the same attribute through the second file -** handle, then close the second file and attribute -** handles, then write to the attribute through the first -** handle. -** -****************************************************************/ -static void -test_attr_bug10(hid_t fcpl, hid_t fapl) -{ - hid_t fid1, fid2; /* File IDs */ - hid_t aid1, aid2; /* Attribute IDs */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims[1] = {1}; /* Attribute dimensions */ - const char *wbuf[1] = {"foo"}; /* Write buffer */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing that vlen attributes can be written to after a second file handle is closed\n")); - - /* Create dataspace */ - sid = H5Screate_simple(1, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create VL string datatype */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcreate"); - ret = H5Tset_size(tid, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create attribute on root group */ - aid1 = H5Acreate2(fid1, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid1, FAIL, "H5Acreate2"); - - /* Open the same file again */ - fid2 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid2, FAIL, "H5Fcreate"); - - /* Open the same attribute through the second file handle */ - aid2 = H5Aopen(fid2, "attr", H5P_DEFAULT); - CHECK(aid2, FAIL, "H5Aopen"); - - /* Close the second attribute and file handles */ - ret = H5Aclose(aid2); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Write to the attribute through the first handle */ - ret = H5Awrite(aid1, tid, wbuf); - - /* Close IDs */ - ret = H5Aclose(aid1); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_attr_bug10() */ - -/**************************************************************** -** -** test_attr_delete_dense(): -** This is to verify the error as described in HDFFV-9277 -** is fixed when deleting the last "large" attribute that -** is stored densely. -** -****************************************************************/ -#if 0 /* Native VOL connector only supports large attributes with latest format */ -static void -test_attr_delete_last_dense(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - hid_t aid; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - hsize_t dim2[2] = {DIM0, DIM1}; /* Dimension sizes */ - int i, j; /* Local index variables */ - double *data = NULL; /* Pointer to the data buffer */ - herr_t ret; /* Generic return status */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deleting the last large attribute stored densely\n")); - - /* Create the file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create the group */ - gid = H5Gcreate2(fid, GRPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate"); - - /* Create the dataspace */ - sid = H5Screate_simple(RANK, dim2, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Attach the attribute to the group */ - aid = H5Acreate2(gid, ATTRNAME, H5T_IEEE_F64LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Allocate the data buffer */ - data = (double *)malloc((size_t)(DIM0 * DIM1) * sizeof(double)); - CHECK_PTR(data, "malloc"); - - /* Initialize the data */ - for (i = 0; i < DIM0; i++) - for (j = 0; j < DIM1; j++) - *(data + i * DIM1 + j) = i + j; - - /* Write to the attribute */ - ret = H5Awrite(aid, H5T_NATIVE_DOUBLE, data); - CHECK(ret, FAIL, "H5Awrite"); - - /* Closing */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the group */ - gid = H5Gopen2(fid, GRPNAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen"); - - /* Delete the attribute */ - ret = H5Adelete(gid, ATTRNAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Closing */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free the data buffer */ - if (data) - free(data); - -} /* test_attr_delete_last_dense() */ -#endif - -/**************************************************************** -** -** test_attr(): Main H5A (attribute) testing routine. -** -****************************************************************/ -void -test_attr(void) -{ - hid_t fapl = (-1), fapl2 = (-1); /* File access property lists */ - hid_t fcpl = (-1), fcpl2 = (-1); /* File creation property lists */ - hid_t dcpl = -1; /* Dataset creation property list */ - unsigned new_format; /* Whether to use the new format or not */ - unsigned use_shared; /* Whether to use shared attributes or not */ - unsigned minimize_dset_oh; /* Whether to use minimized dataset object headers */ - herr_t ret; /* Generic return value */ - - MESSAGE(5, ("Testing Attributes\n")); - - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* fapl2 uses "latest version of the format" for creating objects in the file */ - fapl2 = H5Pcopy(fapl); - CHECK(fapl2, FAIL, "H5Pcopy"); - ret = H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* files with fcpl2 make all attributes ( > 1 byte) shared - * (i.e. all of them :-) */ - fcpl2 = H5Pcopy(fcpl); - CHECK(fcpl2, FAIL, "H5Pcopy"); - ret = H5Pset_shared_mesg_nindexes(fcpl2, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(fcpl2, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)1); - CHECK_I(ret, "H5Pset_shared_mesg_index"); - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); - - ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED); - CHECK(ret, FAIL, ""); - - dcpl_g = dcpl; - - for (minimize_dset_oh = 0; minimize_dset_oh <= 1; minimize_dset_oh++) { - if (minimize_dset_oh != 0) - continue; - -#if 0 - if (minimize_dset_oh == 0) { - MESSAGE(7, ("testing with default dataset object headers\n")); - dcpl_g = H5P_DEFAULT; - } - else { - MESSAGE(7, ("testing with minimzied dataset object headers\n")); - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - ret = H5Pset_dset_no_attrs_hint(dcpl, true); - CHECK_I(ret, "H5Pset_dset_no_attrs_hint"); - dcpl_g = dcpl; - } -#endif - - for (new_format = false; new_format <= true; new_format++) { - hid_t my_fapl = fapl; - - if (new_format) - continue; - -#if 0 - /* Set the FAPL for the type of format */ - if (new_format) { - MESSAGE(7, ("testing with new file format\n")); - my_fapl = fapl2; - } - else { - MESSAGE(7, ("testing with old file format\n")); - my_fapl = fapl; - } -#endif - - /* These next two tests use the same file information */ - test_attr_basic_write(my_fapl); /* Test basic H5A writing code */ - test_attr_basic_read(my_fapl); /* Test basic H5A reading code */ - - /* These next two tests use their own file information */ - test_attr_flush(my_fapl); /* Test H5A I/O in the presence of H5Fflush calls */ - test_attr_plist(my_fapl); /* Test attribute property lists */ - - /* These next two tests use the same file information */ - test_attr_compound_write(my_fapl); /* Test complex datatype H5A writing code */ - test_attr_compound_read(my_fapl); /* Test complex datatype H5A reading code */ - - /* These next two tests use the same file information */ - test_attr_scalar_write(my_fapl); /* Test scalar dataspace H5A writing code */ - test_attr_scalar_read(my_fapl); /* Test scalar dataspace H5A reading code */ - - /* These next four tests use the same file information */ - test_attr_mult_write(my_fapl); /* Test H5A writing code for multiple attributes */ - test_attr_mult_read(my_fapl); /* Test H5A reading code for multiple attributes */ - test_attr_iterate(my_fapl); /* Test H5A iterator code */ - test_attr_delete(my_fapl); /* Test H5A code for deleting attributes */ - - /* This next test uses its own file information */ - test_attr_dtype_shared(my_fapl); /* Test using shared datatypes in attributes */ - - /* This next test uses its own file information */ - test_attr_duplicate_ids(my_fapl); - - for (use_shared = false; use_shared <= true; use_shared++) { - hid_t my_fcpl; - - if (new_format == true && use_shared) { - MESSAGE(7, ("testing with shared attributes\n")); - my_fcpl = fcpl2; - } - else { - MESSAGE(7, ("testing without shared attributes\n")); - my_fcpl = fcpl; - } - - test_attr_big(my_fcpl, my_fapl); /* Test storing big attribute */ - test_attr_null_space(my_fcpl, my_fapl); /* Test storing attribute with NULL dataspace */ - test_attr_deprec(fcpl, my_fapl); /* Test deprecated API routines */ - test_attr_many(new_format, my_fcpl, my_fapl); /* Test storing lots of attributes */ - test_attr_info_null_info_pointer(my_fcpl, - my_fapl); /* Test passing a NULL attribute info pointer to - H5Aget_info(_by_name/_by_idx) */ - test_attr_rename_invalid_name( - my_fcpl, - my_fapl); /* Test passing a NULL or empty attribute name to H5Arename(_by_name) */ - test_attr_get_name_invalid_buf( - my_fcpl, my_fapl); /* Test passing NULL buffer to H5Aget_name(_by_idx) */ - - /* New attribute API routine tests */ - test_attr_info_by_idx(new_format, my_fcpl, - my_fapl); /* Test querying attribute info by index */ - test_attr_delete_by_idx(new_format, my_fcpl, my_fapl); /* Test deleting attribute by index */ - test_attr_iterate2(new_format, my_fcpl, - my_fapl); /* Test iterating over attributes by index */ - test_attr_open_by_idx(new_format, my_fcpl, my_fapl); /* Test opening attributes by index */ - test_attr_open_by_name(new_format, my_fcpl, my_fapl); /* Test opening attributes by name */ - test_attr_create_by_name(new_format, my_fcpl, my_fapl); /* Test creating attributes by name */ - - /* Tests that address specific bugs */ - test_attr_bug1(my_fcpl, my_fapl); /* Test odd allocation operations */ - test_attr_bug2(my_fcpl, my_fapl); /* Test many deleted attributes */ - test_attr_bug3(my_fcpl, my_fapl); /* Test "self referential" attributes */ - test_attr_bug4(my_fcpl, my_fapl); /* Test attributes on named datatypes */ - test_attr_bug5(my_fcpl, - my_fapl); /* Test opening/closing attributes through different file handles */ - test_attr_bug6(my_fcpl, my_fapl); /* Test reading empty attribute */ - /* test_attr_bug7 is specific to the "new" object header format, - * and in fact fails if used with the old format due to the - * attributes being larger than 64K */ - test_attr_bug8(my_fcpl, - my_fapl); /* Test attribute expanding object header with undecoded messages */ - test_attr_bug9(my_fcpl, my_fapl); /* Test large attributes converting to dense storage */ - test_attr_bug10(my_fcpl, my_fapl); /* Test writing an attribute after opening and closing - through a different file handle */ - - /* tests specific to the "new format" */ - if (new_format == true) { - /* General attribute tests */ - test_attr_dense_create(my_fcpl, my_fapl); /* Test dense attribute storage creation */ - test_attr_dense_open(my_fcpl, my_fapl); /* Test opening attributes in dense storage */ - test_attr_dense_delete(my_fcpl, my_fapl); /* Test deleting attributes in dense storage */ - test_attr_dense_rename(my_fcpl, my_fapl); /* Test renaming attributes in dense storage */ - test_attr_dense_unlink( - my_fcpl, my_fapl); /* Test unlinking object with attributes in dense storage */ - test_attr_dense_limits(my_fcpl, my_fapl); /* Test dense attribute storage limits */ - test_attr_dense_dup_ids(my_fcpl, - my_fapl); /* Test duplicated IDs for dense attribute storage */ - - /* Attribute creation order tests */ - test_attr_corder_create_basic( - my_fcpl, my_fapl); /* Test creating an object w/attribute creation order info */ - test_attr_corder_create_compact(my_fcpl, - my_fapl); /* Test compact attribute storage on an object - w/attribute creation order info */ - test_attr_corder_create_dense(my_fcpl, - my_fapl); /* Test dense attribute storage on an object - w/attribute creation order info */ - test_attr_corder_create_reopen(my_fcpl, - my_fapl); /* Test creating attributes w/reopening file from - using new format to using old format */ - test_attr_corder_transition(my_fcpl, - my_fapl); /* Test attribute storage transitions on an object - w/attribute creation order info */ - test_attr_corder_delete(my_fcpl, my_fapl); /* Test deleting object using dense storage - w/attribute creation order info */ - - /* More complex tests with exclusively both "new format" and "shared" attributes */ - if (use_shared == true) { - test_attr_shared_write( - my_fcpl, - my_fapl); /* Test writing to shared attributes in compact & dense storage */ - test_attr_shared_rename( - my_fcpl, - my_fapl); /* Test renaming shared attributes in compact & dense storage */ - test_attr_shared_delete( - my_fcpl, - my_fapl); /* Test deleting shared attributes in compact & dense storage */ - test_attr_shared_unlink(my_fcpl, my_fapl); /* Test unlinking object with shared - attributes in compact & dense storage */ - } /* if using shared attributes */ - -#if 0 /* Native VOL connector only supports large attributes with latest format */ - test_attr_delete_last_dense(my_fcpl, my_fapl); - - /* test_attr_bug7 is specific to the "new" object header format, - * and in fact fails if used with the old format due to the - * attributes being larger than 64K */ - test_attr_bug7(my_fcpl, - my_fapl); /* Test creating and deleting large attributes in ohdr chunk 0 */ -#endif - - } /* if using "new format" */ - } /* for unshared/shared attributes */ - } /* for old/new format */ - - if (minimize_dset_oh != 0) { - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - dcpl_g = H5P_DEFAULT; - } - - } /* for default/minimized dataset object headers */ - - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close FCPLs */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fcpl2); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close FAPLs */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl2); - CHECK(ret, FAIL, "H5Pclose"); -} /* test_attr() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_attr - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_attr(void) -{ - H5Fdelete(FILENAME, H5P_DEFAULT); -} diff --git a/test/API/tchecksum.c b/test/API/tchecksum.c deleted file mode 100644 index 62db33c1fba..00000000000 --- a/test/API/tchecksum.c +++ /dev/null @@ -1,246 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*------------------------------------------------------------------------- - * - * Created: tchecksum.c - * - * Purpose: Test internal checksum routine(s) - * - *------------------------------------------------------------------------- - */ - -/***********/ -/* Headers */ -/***********/ -#include "testhdf5.h" - -/**********/ -/* Macros */ -/**********/ -#define BUF_LEN 3093 /* No particular value */ - -/*******************/ -/* Local variables */ -/*******************/ - -/**************************************************************** -** -** test_chksum_size_one(): Checksum 1 byte buffer -** -****************************************************************/ -static void -test_chksum_size_one(void) -{ - uint8_t buf[1] = {23}; /* Buffer to checksum */ - uint32_t chksum; /* Checksum value */ - - /* Buffer w/real data */ - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0x17001700, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xfa2568b7, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0xa209c931, "H5_checksum_lookup3"); - - /* Buffer w/zero(s) for data */ - memset(buf, 0, sizeof(buf)); - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xfa60fb57, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0x8ba9414b, "H5_checksum_lookup3"); -} /* test_chksum_size_one() */ - -/**************************************************************** -** -** test_chksum_size_two(): Checksum 2 byte buffer -** -****************************************************************/ -static void -test_chksum_size_two(void) -{ - uint8_t buf[2] = {23, 187}; /* Buffer to checksum */ - uint32_t chksum; /* Checksum value */ - - /* Buffer w/real data */ - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0x17bb17bb, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xfc856608, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0x8ba7a6c9, "H5_checksum_lookup3"); - - /* Buffer w/zero(s) for data */ - memset(buf, 0, sizeof(buf)); - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xfc7e9b20, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0x62cd61b3, "H5_checksum_lookup3"); -} /* test_chksum_size_two() */ - -/**************************************************************** -** -** test_chksum_size_three(): Checksum 3 byte buffer -** -****************************************************************/ -static void -test_chksum_size_three(void) -{ - uint8_t buf[3] = {23, 187, 98}; /* Buffer to checksum */ - uint32_t chksum; /* Checksum value */ - - /* Buffer w/real data */ - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0x917679bb, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xfebc5d70, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0xcebdf4f0, "H5_checksum_lookup3"); - - /* Buffer w/zero(s) for data */ - memset(buf, 0, sizeof(buf)); - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xf9cc4c7a, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0x6bd0060f, "H5_checksum_lookup3"); -} /* test_chksum_size_three() */ - -/**************************************************************** -** -** test_chksum_size_four(): Checksum 4 byte buffer -** -****************************************************************/ -static void -test_chksum_size_four(void) -{ - uint8_t buf[4] = {23, 187, 98, 217}; /* Buffer to checksum */ - uint32_t chksum; /* Checksum value */ - - /* Buffer w/real data */ - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0x924f7a94, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xff398a46, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0x2c88bb51, "H5_checksum_lookup3"); - - /* Buffer w/zero(s) for data */ - memset(buf, 0, sizeof(buf)); - chksum = H5_checksum_fletcher32(buf, sizeof(buf)); - VERIFY(chksum, 0, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(buf, sizeof(buf)); - VERIFY(chksum, 0xff117081, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(buf, sizeof(buf), 0); - VERIFY(chksum, 0x049396b8, "H5_checksum_lookup3"); -} /* test_chksum_size_four() */ - -/**************************************************************** -** -** test_chksum_large(): Checksum larger buffer -** -****************************************************************/ -static void -test_chksum_large(void) -{ - uint8_t *large_buf; /* Buffer for checksum calculations */ - uint32_t chksum; /* Checksum value */ - size_t u; /* Local index variable */ - - /* Allocate the buffer */ - large_buf = (uint8_t *)malloc((size_t)BUF_LEN); - CHECK_PTR(large_buf, "malloc"); - - /* Initialize buffer w/known data */ - for (u = 0; u < BUF_LEN; u++) - large_buf[u] = (uint8_t)(u * 3); - - /* Buffer w/real data */ - chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN); - VERIFY(chksum, 0x85b4e2a, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN); - VERIFY(chksum, 0xfbd0f7c0, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0); - VERIFY(chksum, 0x1bd2ee7b, "H5_checksum_lookup3"); - - /* Buffer w/zero(s) for data */ - memset(large_buf, 0, (size_t)BUF_LEN); - chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN); - VERIFY(chksum, 0, "H5_checksum_fletcher32"); - - chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN); - VERIFY(chksum, 0xfac8b4c4, "H5_checksum_crc"); - - chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0); - VERIFY(chksum, 0x930c7afc, "H5_checksum_lookup3"); - - /* Release memory for buffer */ - free(large_buf); -} /* test_chksum_large() */ - -/**************************************************************** -** -** test_checksum(): Main checksum testing routine. -** -****************************************************************/ -void -test_checksum(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing checksum algorithms\n")); - - /* Various checks for fletcher32 checksum algorithm */ - test_chksum_size_one(); /* Test buffer w/only 1 byte */ - test_chksum_size_two(); /* Test buffer w/only 2 bytes */ - test_chksum_size_three(); /* Test buffer w/only 3 bytes */ - test_chksum_size_four(); /* Test buffer w/only 4 bytes */ - test_chksum_large(); /* Test buffer w/larger # of bytes */ - -} /* test_checksum() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_checksum - * - * Purpose: Cleanup temporary test files - * - * Return: none - * - *------------------------------------------------------------------------- - */ -void -cleanup_checksum(void) -{ - /* no file to clean */ -} diff --git a/test/API/tconfig.c b/test/API/tconfig.c deleted file mode 100644 index 3d495246239..00000000000 --- a/test/API/tconfig.c +++ /dev/null @@ -1,181 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tconfig - * - * Test the definitions in the H5config.h as much as possible - * - *************************************************************/ - -#include "testhdf5.h" - -/* macros definitions */ -/* verify C int type: verify the size of signed and unsigned int type - * with the macro size. - */ -#define vrfy_cint_type(ctype, uctype, ctype_macro) \ - do { \ - /* check signed type size */ \ - vrfy_macrosize(ctype, ctype_macro, #ctype_macro); \ - /* check unsigned type size */ \ - vrfy_macrosize(uctype, ctype_macro, #ctype_macro); \ - } while (0) - -/* verify C type sizes: verify the sizeof type with the macro size. */ -#define vrfy_ctype(type, macro) \ - do { \ - vrfy_macrosize(type, macro, #macro); \ - } while (0) - -/* verify if the sizeof(type) matches size defined in macro. */ -/* Needs this extra step so that we can print the macro name. */ -#define vrfy_macrosize(type, macro, macroname) \ - if (sizeof(type) != (macro)) \ - TestErrPrintf("Error: sizeof(%s) is %zu but %s is %d\n", #type, sizeof(type), macroname, \ - (int)(macro)); - -/* local routine prototypes */ -void test_config_ctypes(void); -void test_exit_definitions(void); - -/*------------------------------------------------------------------------- - * Function: test_configure - * - * Purpose: Main configure definitions testing routine - * - * Return: none (error is fed back via global variable num_errs) - * - *------------------------------------------------------------------------- - */ -void -test_configure(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing configure definitions\n")); - test_config_ctypes(); - test_exit_definitions(); -} - -/*------------------------------------------------------------------------- - * Function: cleanup_configure - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_configure(void) -{ - /* no file to clean */ -} - -/*------------------------------------------------------------------------- - * Function: test_config_ctypes - * - * Purpose: test C language data type sizes - * - * Return: none (error is fed back via global variable num_errs) - *------------------------------------------------------------------------- - */ -void -test_config_ctypes(void) -{ - /* standard C89 basic types */ - /* char, signed char, unsigned char are three distinct types. */ - vrfy_ctype(char, H5_SIZEOF_CHAR); - vrfy_cint_type(signed char, unsigned char, H5_SIZEOF_CHAR); - vrfy_cint_type(int, unsigned int, H5_SIZEOF_INT); - vrfy_cint_type(short, unsigned short, H5_SIZEOF_SHORT); - vrfy_cint_type(long, unsigned long, H5_SIZEOF_LONG); - vrfy_ctype(float, H5_SIZEOF_FLOAT); - vrfy_ctype(double, H5_SIZEOF_DOUBLE); - vrfy_ctype(long double, H5_SIZEOF_LONG_DOUBLE); - - /* standard C99 basic types */ - vrfy_cint_type(long long, unsigned long long, H5_SIZEOF_LONG_LONG); - vrfy_cint_type(int8_t, uint8_t, H5_SIZEOF_INT8_T); - vrfy_cint_type(int16_t, uint16_t, H5_SIZEOF_INT16_T); - vrfy_cint_type(int32_t, uint32_t, H5_SIZEOF_INT32_T); - vrfy_cint_type(int64_t, uint64_t, H5_SIZEOF_INT64_T); - - /* Some vendors have different sizes for the signed and unsigned */ - /* fast8_t. Need to check them individually. */ -#if H5_SIZEOF_INT_FAST8_T > 0 - vrfy_ctype(int_fast8_t, H5_SIZEOF_INT_FAST8_T); -#endif - -#if H5_SIZEOF_UINT_FAST8_T > 0 - vrfy_ctype(uint_fast8_t, H5_SIZEOF_UINT_FAST8_T); -#endif - -#if H5_SIZEOF_INT_FAST16_T > 0 - vrfy_cint_type(int_fast16_t, uint_fast16_t, H5_SIZEOF_INT_FAST16_T); -#endif - -#if H5_SIZEOF_INT_FAST32_T > 0 - vrfy_cint_type(int_fast32_t, uint_fast32_t, H5_SIZEOF_INT_FAST32_T); -#endif - -#if H5_SIZEOF_INT_FAST64_T > 0 - vrfy_cint_type(int_fast64_t, uint_fast64_t, H5_SIZEOF_INT_FAST64_T); -#endif - -#if H5_SIZEOF_INT_LEAST8_T > 0 - vrfy_cint_type(int_least8_t, uint_least8_t, H5_SIZEOF_INT_LEAST8_T); -#endif - -#if H5_SIZEOF_INT_LEAST16_T > 0 - vrfy_cint_type(int_least16_t, uint_least16_t, H5_SIZEOF_INT_LEAST16_T); -#endif - -#if H5_SIZEOF_INT_LEAST32_T > 0 - vrfy_cint_type(int_least32_t, uint_least32_t, H5_SIZEOF_INT_LEAST32_T); -#endif - -#if H5_SIZEOF_INT_LEAST64_T > 0 - vrfy_cint_type(int_least64_t, uint_least64_t, H5_SIZEOF_INT_LEAST64_T); -#endif - -#if H5_SIZEOF_OFF_T > 0 - vrfy_ctype(off_t, H5_SIZEOF_OFF_T); -#endif - -#if H5_SIZEOF_SIZE_T > 0 - vrfy_ctype(size_t, H5_SIZEOF_SIZE_T); -#endif - -#if H5_SIZEOF_SSIZE_T > 0 - vrfy_ctype(ssize_t, H5_SIZEOF_SSIZE_T); -#endif -} - -/*------------------------------------------------------------------------- - * Function: test_exit_definitions - * - * Purpose: test the exit macros values - * - * Return: none (error is fed back via global variable num_errs) - *------------------------------------------------------------------------- - */ -void -test_exit_definitions(void) -{ - /* Verify the EXIT_SUCCESS and EXIT_FAILURE are 0 and 1 respectively. */ - /* This should be true for POSIX compliant systems. */ - if (EXIT_SUCCESS != 0) - TestErrPrintf("Error: EXIT_SUCCESS is %d, should be %d\n", EXIT_SUCCESS, 0); - if (EXIT_FAILURE != 1) - TestErrPrintf("Error: EXIT_FAILURE is %d, should be %d\n", EXIT_FAILURE, 1); -} diff --git a/test/API/tcoords.c b/test/API/tcoords.c deleted file mode 100644 index 5599def3210..00000000000 --- a/test/API/tcoords.c +++ /dev/null @@ -1,718 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tcoords - * - * Test the element coordinates for dataspace selection. For - * chunked dataset, when the hyperslab selection of some - * dimensions is full, the library optimize it by "flattening" - * the fully selected dimensions. This program tests if the - * coordinates of selected elements are correctly calculated. - * - *************************************************************/ - -#include "testhdf5.h" - -#define FILENAME "coord.h5" - -#define SINGLE_END_DSET "single_end_dset" -#define MULTI_ENDS_SEL_HYPER_DSET "multiple_ends_dset" - -#define NAME_LEN 128 - -/* Data written to the dataset for single block test. Global variable - * for convenience. */ -int da_buffer[2][3][6][2]; - -/*********************************************************** -** -** test_singleEnd_selElements(): Test element selection of only -** one block. -** -*************************************************************/ -static void -test_singleEnd_selElements(hid_t file, bool is_chunked) -{ - hid_t sid, plid, did, msid; - char dset_name[NAME_LEN]; /* Dataset name */ - size_t elmts_numb; - herr_t ret; /* Generic error return */ - int i, j, k; - hsize_t da_dims[4] = {2, 3, 6, 2}; - hsize_t da_chunksize[4] = {1, 3, 3, 2}; - - /* For testing the full selection in the fastest-growing end */ - int mem1_buffer[1][1][6][2]; - hsize_t mem1_dims[4] = {1, 1, 6, 2}; - hsize_t da_elmts1[12][4] = {{0, 0, 0, 0}, {0, 0, 0, 1}, {0, 0, 1, 0}, {0, 0, 1, 1}, - {0, 0, 2, 0}, {0, 0, 2, 1}, {0, 0, 3, 0}, {0, 0, 3, 1}, - {0, 0, 4, 0}, {0, 0, 4, 1}, {0, 0, 5, 0}, {0, 0, 5, 1}}; - - /* For testing the full selection in the slowest-growing end */ - int mem2_buffer[2][3][1][1]; - hsize_t mem2_dims[4] = {2, 3, 1, 1}; - hsize_t da_elmts2[6][4] = {{0, 0, 0, 0}, {0, 1, 0, 0}, {0, 2, 0, 0}, - {1, 0, 0, 0}, {1, 1, 0, 0}, {1, 2, 0, 0}}; - - /* For testing the full selection in the middle dimensions */ - int mem3_buffer[1][3][6][1]; - hsize_t mem3_dims[4] = {1, 3, 6, 1}; - hsize_t da_elmts3[18][4] = {{0, 0, 0, 0}, {0, 0, 1, 0}, {0, 0, 2, 0}, {0, 0, 3, 0}, {0, 0, 4, 0}, - {0, 0, 5, 0}, {0, 1, 0, 0}, {0, 1, 1, 0}, {0, 1, 2, 0}, {0, 1, 3, 0}, - {0, 1, 4, 0}, {0, 1, 5, 0}, {0, 2, 0, 0}, {0, 2, 1, 0}, {0, 2, 2, 0}, - {0, 2, 3, 0}, {0, 2, 4, 0}, {0, 2, 5, 0}}; - - /* Create and write the dataset */ - sid = H5Screate_simple(4, da_dims, da_dims); - CHECK(sid, FAIL, "H5Screate_simple"); - - plid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plid, FAIL, "H5Pcreate"); - - if (is_chunked) { - ret = H5Pset_chunk(plid, 4, da_chunksize); - CHECK(ret, FAIL, "H5Pset_chunk"); - } - - /* Construct dataset's name */ - memset(dset_name, 0, (size_t)NAME_LEN); - strcat(dset_name, SINGLE_END_DSET); - if (is_chunked) - strcat(dset_name, "_chunked"); - - did = H5Dcreate2(file, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, plid, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Initialize the data to be written to file */ - for (i = 0; i < 2; i++) { - for (j = 0; j < 3; j++) { - for (k = 0; k < 6; k++) { - da_buffer[i][j][k][0] = i * 100 + j * 10 + k; - da_buffer[i][j][k][1] = i * 100 + j * 10 + k + 1; - } - } - } - - ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, da_buffer); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* ****** Case 1: ****** - * Testing the full selection in the fastest-growing end */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - elmts_numb = 12; - - ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Dataspace for memory buffer */ - msid = H5Screate_simple(4, mem1_dims, mem1_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 6; i++) - for (j = 0; j < 2; j++) - if (da_buffer[0][0][i][j] != mem1_buffer[0][0][i][j]) { - TestErrPrintf("%u: Read different values than written at index 0,0,%d,%d\n", __LINE__, i, j); - } - - /* ****** Case 2: ****** - * Testing the full selection in the slowest-growing end */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - elmts_numb = 6; - - ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts2); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Dataspace for memory buffer */ - msid = H5Screate_simple(4, mem2_dims, mem2_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 2; i++) - for (j = 0; j < 3; j++) - if (da_buffer[i][j][0][0] != mem2_buffer[i][j][0][0]) { - TestErrPrintf("%u: Read different values than written at index %d,%d,0,0, da_buffer = %d, " - "mem2_buffer = %d\n", - __LINE__, i, j, da_buffer[i][j][0][0], mem2_buffer[i][j][0][0]); - } - - /* ****** Case 3: ****** - * Testing the full selection in the middle dimensions */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - elmts_numb = 18; - - ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts3); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Dataspace for memory buffer */ - msid = H5Screate_simple(4, mem3_dims, mem3_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 3; i++) - for (j = 0; j < 6; j++) - if (da_buffer[0][i][j][0] != mem3_buffer[0][i][j][0]) { - TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0\n", __LINE__, i, j); - } - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Pclose(plid); - CHECK(ret, FAIL, "H5Pclose"); -} - -/*********************************************************** -** -** test_singleEnd_selHyperslab(): Test full hyperslab selection -** of only one block. -** -*************************************************************/ -static void -test_singleEnd_selHyperslab(hid_t file, bool is_chunked) -{ - hid_t sid, did, msid; - char dset_name[NAME_LEN]; /* Dataset name */ - herr_t ret; /* Generic error return */ - int i, j; - hsize_t da_dims[4] = {2, 3, 6, 2}; - - /* For testing the full selection in the fastest-growing end */ - int mem1_buffer[1][1][6][2]; - hsize_t mem1_dims[4] = {1, 1, 6, 2}; - hsize_t mem1_start[4] = {0, 0, 0, 0}; - hsize_t mem1_count[4] = {1, 1, 1, 1}; - hsize_t mem1_stride[4] = {1, 1, 1, 1}; - hsize_t mem1_block[4] = {1, 1, 6, 2}; - - /* For testing the full selection in the slowest-growing end */ - int mem2_buffer[2][3][1][1]; - hsize_t mem2_dims[4] = {2, 3, 1, 1}; - hsize_t mem2_start[4] = {0, 0, 0, 0}; - hsize_t mem2_count[4] = {1, 1, 1, 1}; - hsize_t mem2_stride[4] = {1, 1, 1, 1}; - hsize_t mem2_block[4] = {2, 3, 1, 1}; - - /* For testing the full selection in the middle dimensions */ - int mem3_buffer[1][3][6][1]; - hsize_t mem3_dims[4] = {1, 3, 6, 1}; - hsize_t mem3_start[4] = {0, 0, 0, 0}; - hsize_t mem3_count[4] = {1, 1, 1, 1}; - hsize_t mem3_stride[4] = {1, 1, 1, 1}; - hsize_t mem3_block[4] = {1, 3, 6, 1}; - - /* Construct dataset's name */ - memset(dset_name, 0, NAME_LEN); - strcat(dset_name, SINGLE_END_DSET); - if (is_chunked) - strcat(dset_name, "_chunked"); - - /* Dataspace for the dataset in file */ - sid = H5Screate_simple(4, da_dims, da_dims); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* ****** Case 1: ****** - * Testing the full selection in the fastest-growing end */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem1_start, mem1_stride, mem1_count, mem1_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Dataspace for memory buffer */ - msid = H5Screate_simple(4, mem1_dims, mem1_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 6; i++) - for (j = 0; j < 2; j++) - if (da_buffer[0][0][i][j] != mem1_buffer[0][0][i][j]) { - TestErrPrintf("%u: Read different values than written at index 0,0,%d,%d\n", __LINE__, i, j); - } - - /* ****** Case 2: ****** - * Testing the full selection in the slowest-growing end */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem2_start, mem2_stride, mem2_count, mem2_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Dataspace for memory buffer */ - msid = H5Screate_simple(4, mem2_dims, mem2_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 2; i++) - for (j = 0; j < 3; j++) - if (da_buffer[i][j][0][0] != mem2_buffer[i][j][0][0]) { - TestErrPrintf("%u: Read different values than written at index %d,%d,0,0\n", __LINE__, i, j); - } - - /* ****** Case 3: ****** - * Testing the full selection in the middle dimensions */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem3_start, mem3_stride, mem3_count, mem3_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Dataspace for memory buffer */ - msid = H5Screate_simple(4, mem3_dims, mem3_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 3; i++) - for (j = 0; j < 6; j++) - if (da_buffer[0][i][j][0] != mem3_buffer[0][i][j][0]) { - TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0\n", __LINE__, i, j); - } - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} - -/*********************************************************** -** -** test_multiple_end(): Test full hyperslab selection of -** multiple blocks. -** -*************************************************************/ -static void -test_multiple_ends(hid_t file, bool is_chunked) -{ - hid_t sid, plid, did, msid; - char dset_name[NAME_LEN]; /* Dataset name */ - herr_t ret; /* Generic error return */ - int i, j, k, l, m, n, p; - hsize_t da_dims[8] = {4, 5, 3, 4, 2, 3, 6, 2}; - hsize_t da_chunksize[8] = {1, 5, 3, 2, 2, 3, 3, 2}; - struct { - int arr[4][5][3][4][2][3][6][2]; - } *data_buf = NULL; - - /* For testing the full selections in the fastest-growing end and in the middle dimensions */ - struct { - int arr[1][1][1][4][2][1][6][2]; - } *mem1_buffer = NULL; - hsize_t mem1_dims[8] = {1, 1, 1, 4, 2, 1, 6, 2}; - hsize_t mem1_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; - hsize_t mem1_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem1_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem1_block[8] = {1, 1, 1, 4, 2, 1, 6, 2}; - - /* For testing the full selections in the slowest-growing end and in the middle dimensions */ - struct { - int arr[4][5][1][4][2][1][1][1]; - } *mem2_buffer = NULL; - hsize_t mem2_dims[8] = {4, 5, 1, 4, 2, 1, 1, 1}; - hsize_t mem2_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; - hsize_t mem2_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem2_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem2_block[8] = {4, 5, 1, 4, 2, 1, 1, 1}; - - /* For testing two unadjacent full selections in the middle dimensions */ - struct { - int arr[1][5][3][1][1][3][6][1]; - } *mem3_buffer = NULL; - hsize_t mem3_dims[8] = {1, 5, 3, 1, 1, 3, 6, 1}; - hsize_t mem3_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; - hsize_t mem3_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem3_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem3_block[8] = {1, 5, 3, 1, 1, 3, 6, 1}; - - /* For testing the full selections in the fastest-growing end and the slowest-growing end */ - struct { - int arr[4][5][1][1][1][1][6][2]; - } *mem4_buffer = NULL; - hsize_t mem4_dims[8] = {4, 5, 1, 1, 1, 1, 6, 2}; - hsize_t mem4_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; - hsize_t mem4_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem4_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem4_block[8] = {4, 5, 1, 1, 1, 1, 6, 2}; - - /* For testing the full selections in the fastest-growing end and slowest-growing end, - * also in the middle dimensions */ - struct { - int arr[4][5][1][4][2][1][6][2]; - } *mem5_buffer = NULL; - hsize_t mem5_dims[8] = {4, 5, 1, 4, 2, 1, 6, 2}; - hsize_t mem5_start[8] = {0, 0, 0, 0, 0, 0, 0, 0}; - hsize_t mem5_count[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem5_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1}; - hsize_t mem5_block[8] = {4, 5, 1, 4, 2, 1, 6, 2}; - - /* Initialize dynamic arrays */ - data_buf = calloc(1, sizeof(*data_buf)); - CHECK_PTR(data_buf, "calloc"); - mem1_buffer = calloc(1, sizeof(*mem1_buffer)); - CHECK_PTR(data_buf, "calloc"); - mem2_buffer = calloc(1, sizeof(*mem2_buffer)); - CHECK_PTR(data_buf, "calloc"); - mem3_buffer = calloc(1, sizeof(*mem3_buffer)); - CHECK_PTR(data_buf, "calloc"); - mem4_buffer = calloc(1, sizeof(*mem4_buffer)); - CHECK_PTR(data_buf, "calloc"); - mem5_buffer = calloc(1, sizeof(*mem5_buffer)); - CHECK_PTR(data_buf, "calloc"); - - /* Create and write the dataset */ - sid = H5Screate_simple(8, da_dims, da_dims); - CHECK(sid, FAIL, "H5Screate_simple"); - - plid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plid, FAIL, "H5Pcreate"); - - if (is_chunked) { - ret = H5Pset_chunk(plid, 8, da_chunksize); - CHECK(ret, FAIL, "H5Pset_chunk"); - } - - /* Construct dataset's name */ - memset(dset_name, 0, NAME_LEN); - strcat(dset_name, MULTI_ENDS_SEL_HYPER_DSET); - if (is_chunked) - strcat(dset_name, "_chunked"); - - did = H5Dcreate2(file, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, plid, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - for (i = 0; i < 4; i++) - for (j = 0; j < 5; j++) - for (k = 0; k < 3; k++) - for (l = 0; l < 4; l++) - for (m = 0; m < 2; m++) - for (n = 0; n < 3; n++) - for (p = 0; p < 6; p++) { - data_buf->arr[i][j][k][l][m][n][p][0] = - i * 1000000 + j * 100000 + k * 10000 + l * 1000 + m * 100 + n * 10 + p; - data_buf->arr[i][j][k][l][m][n][p][1] = i * 1000000 + j * 100000 + k * 10000 + - l * 1000 + m * 100 + n * 10 + p + 1; - } - - ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* ****** Case 1: ****** - * Testing the full selections in the fastest-growing end and in the middle dimensions*/ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem1_start, mem1_stride, mem1_count, mem1_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - msid = H5Screate_simple(8, mem1_dims, mem1_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 4; i++) - for (j = 0; j < 2; j++) - for (k = 0; k < 6; k++) - for (l = 0; l < 2; l++) - if (data_buf->arr[0][0][0][i][j][0][k][l] != mem1_buffer->arr[0][0][0][i][j][0][k][l]) { - TestErrPrintf("%u: Read different values than written at index 0,0,0,%d,%d,0,%d,%d\n", - __LINE__, i, j, k, l); - } - - /* ****** Case 2: ****** - * Testing the full selections in the slowest-growing end and in the middle dimensions*/ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem2_start, mem2_stride, mem2_count, mem2_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - msid = H5Screate_simple(8, mem2_dims, mem2_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 4; i++) - for (j = 0; j < 5; j++) - for (k = 0; k < 4; k++) - for (l = 0; l < 2; l++) - if (data_buf->arr[i][j][0][k][l][0][0][0] != mem2_buffer->arr[i][j][0][k][l][0][0][0]) { - TestErrPrintf("%u: Read different values than written at index %d,%d,0,%d,%d,0,0,0\n", - __LINE__, i, j, k, l); - } - - /* ****** Case 3: ****** - * Testing two unadjacent full selections in the middle dimensions */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem3_start, mem3_stride, mem3_count, mem3_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - msid = H5Screate_simple(8, mem3_dims, mem3_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 5; i++) - for (j = 0; j < 3; j++) - for (k = 0; k < 3; k++) - for (l = 0; l < 6; l++) - if (data_buf->arr[0][i][j][0][0][k][l][0] != mem3_buffer->arr[0][i][j][0][0][k][l][0]) { - TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0,0,%d,%d,0\n", - __LINE__, i, j, k, l); - } - - /* ****** Case 4: ****** - * Testing the full selections in the fastest-growing end and the slowest-growing end */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem4_start, mem4_stride, mem4_count, mem4_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - msid = H5Screate_simple(8, mem4_dims, mem4_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem4_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 4; i++) - for (j = 0; j < 5; j++) - for (k = 0; k < 6; k++) - for (l = 0; l < 2; l++) - if (data_buf->arr[i][j][0][0][0][0][k][l] != mem4_buffer->arr[i][j][0][0][0][0][k][l]) { - TestErrPrintf("%u: Read different values than written at index %d,%d,0,0,0,0,%d,%d\n", - __LINE__, i, j, k, l); - } - - /* ****** Case 5: ****** - * Testing the full selections in the fastest-growing end and the slowest-growing end, - * and also in the middle dimensions */ - did = H5Dopen2(file, dset_name, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Select the elements in the dataset */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem5_start, mem5_stride, mem5_count, mem5_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - msid = H5Screate_simple(8, mem5_dims, mem5_dims); - CHECK(msid, FAIL, "H5Screate_simple"); - - ret = H5Sselect_all(msid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem5_buffer); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - for (i = 0; i < 4; i++) - for (j = 0; j < 5; j++) - for (k = 0; k < 4; k++) - for (l = 0; l < 2; l++) - for (m = 0; m < 6; m++) - for (n = 0; n < 2; n++) - if (data_buf->arr[i][j][0][k][l][0][m][n] != - mem5_buffer->arr[i][j][0][k][l][0][m][n]) { - TestErrPrintf( - "%u: Read different values than written at index %d,%d,0,%d,%d,0,%d,%d\n", - __LINE__, i, j, k, l, m, n); - } - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Pclose(plid); - CHECK(ret, FAIL, "H5Pclose"); - - free(data_buf); - free(mem1_buffer); - free(mem2_buffer); - free(mem3_buffer); - free(mem4_buffer); - free(mem5_buffer); -} - -/**************************************************************** -** -** test_coords(): Main testing routine. -** -****************************************************************/ -void -test_coords(void) -{ - hid_t fid; - bool is_chunk[2] = {true, false}; - int i; - herr_t ret; /* Generic error return */ - - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - for (i = 0; i < 2; i++) { - /* Test H5Sselect_elements with selection of one block of data */ - test_singleEnd_selElements(fid, is_chunk[i]); - - /* Test H5Sselect_hyperslab with selection of one block of data */ - test_singleEnd_selHyperslab(fid, is_chunk[i]); - - /* Test H5Sselect_hyperslab with selection of multiple blocks of data */ - test_multiple_ends(fid, is_chunk[i]); - } - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} - -/*------------------------------------------------------------------------- - * Function: cleanup_coords - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_coords(void) -{ - H5Fdelete(FILENAME, H5P_DEFAULT); -} diff --git a/test/API/testhdf5.c b/test/API/testhdf5.c deleted file mode 100644 index ca5d0e41eac..00000000000 --- a/test/API/testhdf5.c +++ /dev/null @@ -1,716 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - FILE - testhdf5.c - HDF5 testing framework main file. - - REMARKS - General test wrapper for HDF5 base library test programs - - DESIGN - Each test function should be implemented as function having no - parameters and returning void (i.e. no return value). They should be put - into the list of AddTest() calls in main() below. Functions which depend - on other functionality should be placed below the AddTest() call for the - base functionality testing. - Each test module should include testhdf5.h and define a unique set of - names for test files they create. - - BUGS/LIMITATIONS - - - */ - -/* ANY new test needs to have a prototype in testhdf5.h */ -#include "testhdf5.h" - -int nerrors = 0; - -char *paraprefix = NULL; /* for command line option para-prefix */ - -/* Length of multi-file VFD filename buffers */ -#define H5TEST_MULTI_FILENAME_LEN 1024 - -uint64_t vol_cap_flags_g = H5VL_CAP_FLAG_NONE; - -/* - * This routine is designed to provide equivalent functionality to 'printf' - * and allow easy replacement for environments which don't have stdin/stdout - * available. (i.e. Windows & the Mac) - */ -H5_ATTR_FORMAT(printf, 1, 2) -int -print_func(const char *format, ...) -{ - va_list arglist; - int ret_value; - - va_start(arglist, format); - ret_value = vprintf(format, arglist); - va_end(arglist); - return ret_value; -} - -/* - * This routine is designed to provide equivalent functionality to 'printf' - * and also increment the error count for the testing framework. - */ -int -TestErrPrintf(const char *format, ...) -{ - va_list arglist; - int ret_value; - - /* Increment the error count */ - nerrors++; - - /* Print the requested information */ - va_start(arglist, format); - ret_value = vprintf(format, arglist); - va_end(arglist); - - /* Return the length of the string produced (like printf() does) */ - return ret_value; -} - -#ifdef H5_HAVE_PARALLEL -/*------------------------------------------------------------------------- - * Function: getenv_all - * - * Purpose: Used to get the environment that the root MPI task has. - * name specifies which environment variable to look for - * val is the string to which the value of that environment - * variable will be copied. - * - * NOTE: The pointer returned by this function is only - * valid until the next call to getenv_all and the data - * stored there must be copied somewhere else before any - * further calls to getenv_all take place. - * - * Return: pointer to a string containing the value of the environment variable - * NULL if the variable doesn't exist in task 'root's environment. - *------------------------------------------------------------------------- - */ -char * -getenv_all(MPI_Comm comm, int root, const char *name) -{ - int mpi_size, mpi_rank, mpi_initialized, mpi_finalized; - int len; - static char *env = NULL; - - assert(name); - - MPI_Initialized(&mpi_initialized); - MPI_Finalized(&mpi_finalized); - - if (mpi_initialized && !mpi_finalized) { - MPI_Comm_rank(comm, &mpi_rank); - MPI_Comm_size(comm, &mpi_size); - assert(root < mpi_size); - - /* The root task does the getenv call - * and sends the result to the other tasks */ - if (mpi_rank == root) { - env = getenv(name); - if (env) { - len = (int)strlen(env); - MPI_Bcast(&len, 1, MPI_INT, root, comm); - MPI_Bcast(env, len, MPI_CHAR, root, comm); - } - else { - /* len -1 indicates that the variable was not in the environment */ - len = -1; - MPI_Bcast(&len, 1, MPI_INT, root, comm); - } - } - else { - MPI_Bcast(&len, 1, MPI_INT, root, comm); - if (len >= 0) { - if (env == NULL) - env = (char *)malloc((size_t)len + 1); - else if (strlen(env) < (size_t)len) - env = (char *)realloc(env, (size_t)len + 1); - - MPI_Bcast(env, len, MPI_CHAR, root, comm); - env[len] = '\0'; - } - else { - if (env) - free(env); - env = NULL; - } - } -#ifndef NDEBUG - MPI_Barrier(comm); -#endif - } - else { - /* use original getenv */ - if (env) - free(env); - env = getenv(name); - } /* end if */ - - return env; -} - -#endif - -/*------------------------------------------------------------------------- - * Function: h5_fileaccess - * - * Purpose: Returns a file access template which is the default template - * but with a file driver, VOL connector, or libver bound set - * according to a constant or environment variable - * - * Return: Success: A file access property list - * Failure: H5I_INVALID_HID - * - *------------------------------------------------------------------------- - */ -hid_t -h5_fileaccess(void) -{ - hid_t fapl_id = H5I_INVALID_HID; - - if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) - goto error; - - /* Finally, check for libver bounds */ - if (h5_get_libver_fapl(fapl_id) < 0) - goto error; - - return fapl_id; - -error: - if (fapl_id != H5I_INVALID_HID) - H5Pclose(fapl_id); - return H5I_INVALID_HID; -} /* end h5_fileaccess() */ - -/*------------------------------------------------------------------------- - * Function: h5_get_libver_fapl - * - * Purpose: Sets the library version bounds for a FAPL according to the - * value in the constant or environment variable "HDF5_LIBVER_BOUNDS". - * - * Return: Success: 0 - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -herr_t -h5_get_libver_fapl(hid_t fapl) -{ - const char *env = NULL; /* HDF5_DRIVER environment variable */ - const char *tok = NULL; /* strtok pointer */ - char *lasts = NULL; /* Context pointer for strtok_r() call */ - char buf[1024]; /* buffer for tokenizing HDF5_DRIVER */ - - /* Get the environment variable, if it exists */ - env = getenv("HDF5_LIBVER_BOUNDS"); -#ifdef HDF5_LIBVER_BOUNDS - /* Use the environment variable, then the compile-time constant */ - if (!env) - env = HDF5_LIBVER_BOUNDS; -#endif - - /* If the environment variable was not set, just return - * without modifying the FAPL. - */ - if (!env || !*env) - goto done; - - /* Get the first 'word' of the environment variable. - * If it's nothing (environment variable was whitespace) - * just return the default fapl. - */ - strncpy(buf, env, sizeof(buf)); - buf[sizeof(buf) - 1] = '\0'; - if (NULL == (tok = HDstrtok_r(buf, " \t\n\r", &lasts))) - goto done; - - if (!strcmp(tok, "latest")) { - /* use the latest format */ - if (H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) - goto error; - } /* end if */ - else { - /* Unknown setting */ - goto error; - } /* end else */ - -done: - return 0; - -error: - return -1; -} /* end h5_get_libver_fapl() */ - -#ifndef HDF5_PARAPREFIX -#define HDF5_PARAPREFIX "" -#endif -static char * -h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fullname, size_t size, - bool nest_printf, bool subst_for_superblock) -{ - const char *prefix = NULL; - const char *driver_env_var = NULL; /* HDF5_DRIVER environment variable */ - char *ptr, last = '\0'; - const char *suffix = _suffix; - size_t i, j; - hid_t driver = -1; - int isppdriver = 0; /* if the driver is MPI parallel */ - - if (!base_name || !fullname || size < 1) - return NULL; - - memset(fullname, 0, size); - - /* Determine if driver is set by environment variable. If it is, - * only generate a suffix if fixing the filename for the superblock - * file. */ - driver_env_var = getenv(HDF5_DRIVER); - if (driver_env_var && (H5P_DEFAULT == fapl) && subst_for_superblock) - fapl = H5P_FILE_ACCESS_DEFAULT; - - /* figure out the suffix */ - if (H5P_DEFAULT != fapl) { - if ((driver = H5Pget_driver(fapl)) < 0) - return NULL; - - if (suffix) { - if (H5FD_FAMILY == driver) { - if (subst_for_superblock) - suffix = "-000000.h5"; - else - suffix = nest_printf ? "-%%06d.h5" : "-%06d.h5"; - } - else if (H5FD_MULTI == driver) { - - /* Check the HDF5_DRIVER environment variable in case - * we are using the split driver since both of those - * use the multi VFD under the hood. - */ - if (driver_env_var && !strcmp(driver_env_var, "split")) { - /* split VFD */ - if (subst_for_superblock) - suffix = ".h5.meta"; - } - else { - /* multi VFD */ - if (subst_for_superblock) - suffix = "-s.h5"; - else - suffix = NULL; - } - } - } - } - - /* Must first check fapl is not H5P_DEFAULT (-1) because H5FD_XXX - * could be of value -1 if it is not defined. - */ - isppdriver = ((H5P_DEFAULT != fapl) || driver_env_var) && (H5FD_MPIO == driver); -#if 0 - /* Check HDF5_NOCLEANUP environment setting. - * (The #ifdef is needed to prevent compile failure in case MPI is not - * configured.) - */ - if (isppdriver) { -#ifdef H5_HAVE_PARALLEL - if (getenv_all(MPI_COMM_WORLD, 0, HDF5_NOCLEANUP)) - SetTestNoCleanup(); -#endif /* H5_HAVE_PARALLEL */ - } - else { - if (getenv(HDF5_NOCLEANUP)) - SetTestNoCleanup(); - } -#endif - /* Check what prefix to use for test files. Process HDF5_PARAPREFIX and - * HDF5_PREFIX. - * Use different ones depending on parallel or serial driver used. - * (The #ifdef is needed to prevent compile failure in case MPI is not - * configured.) - */ - if (isppdriver) { -#ifdef H5_HAVE_PARALLEL - /* - * For parallel: - * First use command line option, then the environment - * variable, then try the constant - */ - static int explained = 0; - - prefix = (paraprefix ? paraprefix : getenv_all(MPI_COMM_WORLD, 0, "HDF5_PARAPREFIX")); - - if (!prefix && !explained) { - /* print hint by process 0 once. */ - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - if (mpi_rank == 0) - printf("*** Hint ***\n" - "You can use environment variable HDF5_PARAPREFIX to " - "run parallel test files in a\n" - "different directory or to add file type prefix. e.g.,\n" - " HDF5_PARAPREFIX=pfs:/PFS/user/me\n" - " export HDF5_PARAPREFIX\n" - "*** End of Hint ***\n"); - - explained = true; -#ifdef HDF5_PARAPREFIX - prefix = HDF5_PARAPREFIX; -#endif /* HDF5_PARAPREFIX */ - } -#endif /* H5_HAVE_PARALLEL */ - } - else { - /* - * For serial: - * First use the environment variable, then try the constant - */ - prefix = getenv("HDF5_PREFIX"); - -#ifdef HDF5_PREFIX - if (!prefix) - prefix = HDF5_PREFIX; -#endif /* HDF5_PREFIX */ - } - - /* Prepend the prefix value to the base name */ - if (prefix && *prefix) { - if (isppdriver) { - /* This is a parallel system */ - char *subdir; - - if (!strcmp(prefix, HDF5_PARAPREFIX)) { - /* - * If the prefix specifies the HDF5_PARAPREFIX directory, then - * default to using the "/tmp/$USER" or "/tmp/$LOGIN" - * directory instead. - */ - char *user, *login; - - user = getenv("USER"); - login = getenv("LOGIN"); - subdir = (user ? user : login); - - if (subdir) { - for (i = 0; i < size && prefix[i]; i++) - fullname[i] = prefix[i]; - - fullname[i++] = '/'; - - for (j = 0; i < size && subdir[j]; ++i, ++j) - fullname[i] = subdir[j]; - } - } - - if (!fullname[0]) { - /* We didn't append the prefix yet */ - strncpy(fullname, prefix, size); - fullname[size - 1] = '\0'; - } - - if (strlen(fullname) + strlen(base_name) + 1 < size) { - /* - * Append the base_name with a slash first. Multiple - * slashes are handled below. - */ - h5_stat_t buf; - - if (HDstat(fullname, &buf) < 0) - /* The directory doesn't exist just yet */ - if (HDmkdir(fullname, (mode_t)0755) < 0 && errno != EEXIST) - /* - * We couldn't make the "/tmp/${USER,LOGIN}" - * subdirectory. Default to PREFIX's original - * prefix value. - */ - strcpy(fullname, prefix); - - strcat(fullname, "/"); - strcat(fullname, base_name); - } - else { - /* Buffer is too small */ - return NULL; - } - } - else { - if (snprintf(fullname, size, "%s/%s", prefix, base_name) == (int)size) - /* Buffer is too small */ - return NULL; - } - } - else if (strlen(base_name) >= size) { - /* Buffer is too small */ - return NULL; - } - else { - strcpy(fullname, base_name); - } - - /* Append a suffix */ - if (suffix) { - if (strlen(fullname) + strlen(suffix) >= size) - return NULL; - - strcat(fullname, suffix); - } - - /* Remove any double slashes in the filename */ - for (ptr = fullname, i = j = 0; ptr && i < size; i++, ptr++) { - if (*ptr != '/' || last != '/') - fullname[j++] = *ptr; - - last = *ptr; - } - - return fullname; -} - -char * -h5_fixname(const char *base_name, hid_t fapl, char *fullname, size_t size) -{ - return (h5_fixname_real(base_name, fapl, ".h5", fullname, size, false, false)); -} - -char * -h5_fixname_superblock(const char *base_name, hid_t fapl_id, char *fullname, size_t size) -{ - return (h5_fixname_real(base_name, fapl_id, ".h5", fullname, size, false, true)); -} - -bool -h5_using_default_driver(const char *drv_name) -{ - bool ret_val = true; - - assert(H5_DEFAULT_VFD == H5FD_SEC2); - - if (!drv_name) - drv_name = getenv(HDF5_DRIVER); - - if (drv_name) - return (!strcmp(drv_name, "sec2") || !strcmp(drv_name, "nomatch")); - - return ret_val; -} - -herr_t -h5_driver_is_default_vfd_compatible(hid_t fapl_id, bool *default_vfd_compatible) -{ - unsigned long feat_flags = 0; - hid_t driver_id = H5I_INVALID_HID; - herr_t ret_value = SUCCEED; - - assert(fapl_id >= 0); - assert(default_vfd_compatible); - - if (fapl_id == H5P_DEFAULT) - fapl_id = H5P_FILE_ACCESS_DEFAULT; - - if ((driver_id = H5Pget_driver(fapl_id)) < 0) - return FAIL; - - if (H5FDdriver_query(driver_id, &feat_flags) < 0) - return FAIL; - - *default_vfd_compatible = (feat_flags & H5FD_FEAT_DEFAULT_VFD_COMPATIBLE); - - return ret_value; -} /* end h5_driver_is_default_vfd_compatible() */ - -int -main(int argc, char *argv[]) -{ -#if defined(H5_PARALLEL_TEST) - MPI_Init(&argc, &argv); -#else - (void)argc; - (void)argv; -#endif - - printf("===================================\n"); - printf("HDF5 TESTS START\n"); - printf("===================================\n"); - - /* Initialize testing framework */ - /* TestInit(argv[0], NULL, NULL); */ - - /* Tests are generally arranged from least to most complexity... */ - /* AddTest("config", test_configure, cleanup_configure, "Configure definitions", NULL); */ - printf("** CONFIGURE DEFINITIONS **\n"); - test_configure(); - printf("\n"); - - /* AddTest("metadata", test_metadata, cleanup_metadata, "Encoding/decoding metadata", NULL); */ - - /* AddTest("checksum", test_checksum, cleanup_checksum, "Checksum algorithm", NULL); */ - printf("** CHECKSUM ALGORITHM **\n"); - test_checksum(); - printf("\n"); - - /* AddTest("tst", test_tst, NULL, "Ternary Search Trees", NULL); */ - - /* AddTest("heap", test_heap, NULL, "Memory Heaps", NULL); */ - - /* AddTest("skiplist", test_skiplist, NULL, "Skip Lists", NULL); */ - - /* AddTest("refstr", test_refstr, NULL, "Reference Counted Strings", NULL); */ - - /* AddTest("file", test_file, cleanup_file, "Low-Level File I/O", NULL); */ - printf("** LOW-LEVEL FILE I/O **\n"); - test_file(); - printf("\n"); - - /* AddTest("objects", test_h5o, cleanup_h5o, "Generic Object Functions", NULL); */ - printf("** GENERIC OBJECT FUNCTIONS **\n"); - test_h5o(); - printf("\n"); - - /* AddTest("h5s", test_h5s, cleanup_h5s, "Dataspaces", NULL); */ - printf("** DATASPACES **\n"); - test_h5s(); - printf("\n"); - - /* AddTest("coords", test_coords, cleanup_coords, "Dataspace coordinates", NULL); */ - printf("** DATASPACE COORDINATES **\n"); - test_coords(); - printf("\n"); - - /* AddTest("sohm", test_sohm, cleanup_sohm, "Shared Object Header Messages", NULL); */ - - /* AddTest("attr", test_attr, cleanup_attr, "Attributes", NULL); */ - printf("** ATTRIBUTES **\n"); - test_attr(); - printf("\n"); - - /* AddTest("select", test_select, cleanup_select, "Selections", NULL); */ - printf("** SELECTIONS **\n"); - test_select(); - printf("\n"); - - /* AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL); */ - printf("** TIME DATATYPES**\n"); - test_time(); - printf("\n"); - - /* AddTest("ref_deprec", test_reference_deprec, cleanup_reference_deprec, "Deprecated References", NULL); - */ - - /* AddTest("ref", test_reference, cleanup_reference, "References", NULL); */ - printf("** REFERENCES **\n"); - test_reference(); - printf("\n"); - - /* AddTest("vltypes", test_vltypes, cleanup_vltypes, "Variable-Length Datatypes", NULL); */ - printf("** VARIABLE-LENGTH DATATYPES **\n"); - test_vltypes(); - printf("\n"); - - /* AddTest("vlstrings", test_vlstrings, cleanup_vlstrings, "Variable-Length Strings", NULL); */ - printf("** VARIABLE-LENGTH STRINGS **\n"); - test_vlstrings(); - printf("\n"); - - /* AddTest("iterate", test_iterate, cleanup_iterate, "Group & Attribute Iteration", NULL); */ - printf("** GROUP & ATTRIBUTE ITERATION **\n"); - test_iterate(); - printf("\n"); - - /* AddTest("array", test_array, cleanup_array, "Array Datatypes", NULL); */ - printf("** ARRAY DATATYPES **\n"); - test_array(); - printf("\n"); - - /* AddTest("genprop", test_genprop, cleanup_genprop, "Generic Properties", NULL); */ - printf("** GENERIC PROPERTIES **\n"); - test_genprop(); - printf("\n"); - - /* AddTest("unicode", test_unicode, cleanup_unicode, "UTF-8 Encoding", NULL); */ - printf("** UTF-8 ENCODING **\n"); - test_unicode(); - printf("\n"); - - /* AddTest("id", test_ids, NULL, "User-Created Identifiers", NULL); */ - printf("** USER-CREATED IDENTIFIERS **\n"); - test_ids(); - printf("\n"); - - /* AddTest("misc", test_misc, cleanup_misc, "Miscellaneous", NULL); */ - printf("** MISCELLANEOUS **\n"); - test_misc(); - printf("\n"); - - /* Display testing information */ - /* TestInfo(argv[0]); */ - - /* Parse command line arguments */ - /* TestParseCmdLine(argc,argv); */ - - /* Perform requested testing */ - /* PerformTests(); */ - - /* Display test summary, if requested */ - /* if (GetTestSummary()) - TestSummary(); */ - - /* Clean up test files, if allowed */ - if (/* GetTestCleanup() && */ !getenv("HDF5_NOCLEANUP")) { - /* TestCleanup(); */ - - printf("TEST CLEANUP\n"); - - H5E_BEGIN_TRY - cleanup_configure(); - cleanup_checksum(); - cleanup_file(); - cleanup_h5o(); - cleanup_h5s(); - cleanup_coords(); - cleanup_attr(); - cleanup_select(); - cleanup_time(); - cleanup_reference(); - cleanup_vltypes(); - cleanup_vlstrings(); - cleanup_iterate(); - cleanup_array(); - cleanup_genprop(); - cleanup_unicode(); - cleanup_misc(); - H5E_END_TRY - - printf("\n"); - } - - /* Release test infrastructure */ - /* TestShutdown(); */ - - /* Exit failure if errors encountered; else exit success. */ - /* No need to print anything since PerformTests() already does. */ - if (nerrors /* GetTestNumErrs() */ > 0) { - printf("** HDF5 tests failed with %d errors **\n", nerrors); - exit(EXIT_FAILURE); - } - else { - printf("** HDF5 tests ran successfully **\n"); - exit(EXIT_SUCCESS); - } -} /* end main() */ diff --git a/test/API/testhdf5.h b/test/API/testhdf5.h deleted file mode 100644 index 4e9e81dad92..00000000000 --- a/test/API/testhdf5.h +++ /dev/null @@ -1,351 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * This header file contains information required for testing the HDF5 library. - */ - -#ifndef TESTHDF5_H -#define TESTHDF5_H - -/* Include generic testing header also */ -/* #include "h5test.h" */ -#include "hdf5.h" -#include "H5private.h" - -#define VERBO_NONE 0 /* None */ -#define VERBO_DEF 3 /* Default */ -#define VERBO_LO 5 /* Low */ -#define VERBO_MED 7 /* Medium */ -#define VERBO_HI 9 /* High */ - -/* Turn off verbose reporting by default */ -#define VERBOSE_MED (false) -#define VERBOSE_HI (false) - -/* Use %ld to print the value because long should cover most cases. */ -/* Used to make certain a return value _is_not_ a value */ -#define CHECK(ret, val, where) \ - do { \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d " \ - "in %s returned %ld \n", \ - where, (int)__LINE__, __FILE__, (long)(ret)); \ - } \ - if ((ret) == (val)) { \ - TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld at line %4d " \ - "in %s\n", \ - where, (long)(ret), (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } while (0) - -#define CHECK_I(ret, where) \ - do { \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %ld\n", (where), (int)__LINE__, \ - __FILE__, (long)(ret)); \ - } \ - if ((ret) < 0) { \ - TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld line %4d in %s\n", (where), (long)(ret), \ - (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } while (0) - -/* Check that a pointer is valid (i.e.: not NULL) */ -#define CHECK_PTR(ret, where) \ - { \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ - __FILE__, ((const void *)ret)); \ - } \ - if (!(ret)) { \ - TestErrPrintf("*** UNEXPECTED RETURN from %s is NULL line %4d in %s\n", (where), (int)__LINE__, \ - __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } - -/* Check that a pointer is NULL */ -#define CHECK_PTR_NULL(ret, where) \ - { \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ - __FILE__, ((const void *)ret)); \ - } \ - if (ret) { \ - TestErrPrintf("*** UNEXPECTED RETURN from %s is not NULL line %4d in %s\n", (where), \ - (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } - -/* Check that two pointers are equal */ -#define CHECK_PTR_EQ(ret, val, where) \ - { \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \ - __FILE__, (const void *)(ret)); \ - } \ - if (ret != val) { \ - TestErrPrintf( \ - "*** UNEXPECTED RETURN from %s: returned value of %p is not equal to %p line %4d in %s\n", \ - (where), (const void *)(ret), (const void *)(val), (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } - -/* Used to make certain a return value _is_ a value */ -#define VERIFY(_x, _val, where) \ - do { \ - long __x = (long)_x, __val = (long)_val; \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s had value " \ - "%ld \n", \ - (where), (int)__LINE__, __FILE__, __x); \ - } \ - if ((__x) != (__val)) { \ - TestErrPrintf("*** UNEXPECTED VALUE from %s should be %ld, but is %ld at line %4d " \ - "in %s\n", \ - (where), __val, __x, (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } while (0) - -/* Used to make certain a (non-'long' type's) return value _is_ a value */ -#define VERIFY_TYPE(_x, _val, _type, _format, where) \ - do { \ - _type __x = (_type)_x, __val = (_type)_val; \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s had value " _format " \n", (where), \ - (int)__LINE__, __FILE__, __x); \ - } \ - if ((__x) != (__val)) { \ - TestErrPrintf("*** UNEXPECTED VALUE from %s should be " _format ", but is " _format \ - " at line %4d " \ - "in %s\n", \ - (where), __val, __x, (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } while (0) - -/* Used to make certain a string return value _is_ a value */ -#define VERIFY_STR(x, val, where) \ - do { \ - if (VERBOSE_HI) { \ - print_func(" Call to routine: %15s at line %4d in %s had value " \ - "%s \n", \ - (where), (int)__LINE__, __FILE__, x); \ - } \ - if (strcmp(x, val) != 0) { \ - TestErrPrintf("*** UNEXPECTED VALUE from %s should be %s, but is %s at line %4d " \ - "in %s\n", \ - where, val, x, (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } while (0) - -/* Used to document process through a test and to check for errors */ -#define RESULT(ret, func) \ - do { \ - if (VERBOSE_MED) { \ - print_func(" Call to routine: %15s at line %4d in %s returned " \ - "%ld\n", \ - func, (int)__LINE__, __FILE__, (long)(ret)); \ - } \ - if (VERBOSE_HI) \ - H5Eprint2(H5E_DEFAULT, stdout); \ - if ((ret) == FAIL) { \ - TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld at line %4d " \ - "in %s\n", \ - func, (long)(ret), (int)__LINE__, __FILE__); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - } \ - } while (0) - -/* Used to document process through a test */ -#if defined(H5_HAVE_PARALLEL) && defined(H5_PARALLEL_TEST) -#define MESSAGE(V, A) \ - { \ - int mpi_rank; \ - \ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); \ - if (mpi_rank == 0 && VERBO_LO /* HDGetTestVerbosity() */ >= (V)) \ - print_func A; \ - } -#else /* H5_HAVE_PARALLEL */ -#define MESSAGE(V, A) \ - { \ - if (VERBO_LO /* HDGetTestVerbosity() */ >= (V)) \ - print_func A; \ - } -#endif /* H5_HAVE_PARALLEL */ - -/* Used to indicate an error that is complex to check for */ -#define ERROR(where) \ - do { \ - if (VERBOSE_HI) \ - print_func(" Call to routine: %15s at line %4d in %s returned " \ - "invalid result\n", \ - where, (int)__LINE__, __FILE__); \ - TestErrPrintf("*** UNEXPECTED RESULT from %s at line %4d in %s\n", where, (int)__LINE__, __FILE__); \ - } while (0) - -/* definitions for command strings */ -#define VERBOSITY_STR "Verbosity" -#define SKIP_STR "Skip" -#define TEST_STR "Test" -#define CLEAN_STR "Cleanup" - -#define AT() printf(" at %s:%d in %s()...\n", __FILE__, __LINE__, __func__); -#define TESTING(WHAT) \ - { \ - printf("Testing %-62s", WHAT); \ - fflush(stdout); \ - } -#define TESTING_2(WHAT) \ - { \ - printf(" Testing %-60s", WHAT); \ - fflush(stdout); \ - } -#define PASSED() \ - { \ - puts(" PASSED"); \ - fflush(stdout); \ - } -#define H5_FAILED() \ - { \ - puts("*FAILED*"); \ - fflush(stdout); \ - } -#define H5_WARNING() \ - { \ - puts("*WARNING*"); \ - fflush(stdout); \ - } -#define SKIPPED() \ - { \ - puts(" -SKIP-"); \ - fflush(stdout); \ - } -#define PUTS_ERROR(s) \ - { \ - puts(s); \ - AT(); \ - goto error; \ - } -#define TEST_ERROR \ - { \ - H5_FAILED(); \ - AT(); \ - goto error; \ - } -#define STACK_ERROR \ - { \ - H5Eprint2(H5E_DEFAULT, stdout); \ - goto error; \ - } -#define FAIL_STACK_ERROR \ - { \ - H5_FAILED(); \ - AT(); \ - H5Eprint2(H5E_DEFAULT, stdout); \ - goto error; \ - } -#define FAIL_PUTS_ERROR(s) \ - { \ - H5_FAILED(); \ - AT(); \ - puts(s); \ - goto error; \ - } - -#ifdef __cplusplus -extern "C" { -#endif - -extern int nerrors; - -int print_func(const char *format, ...); -int TestErrPrintf(const char *format, ...); -hid_t h5_fileaccess(void); -/* Functions that will replace components of a FAPL */ -herr_t h5_get_vfd_fapl(hid_t fapl_id); -herr_t h5_get_libver_fapl(hid_t fapl_id); -char *h5_fixname(const char *base_name, hid_t fapl, char *fullname, size_t size); -char *h5_fixname_superblock(const char *base_name, hid_t fapl, char *fullname, size_t size); -bool h5_using_default_driver(const char *drv_name); -herr_t h5_driver_is_default_vfd_compatible(hid_t fapl_id, bool *default_vfd_compatible); - -#ifdef H5_HAVE_PARALLEL -char *getenv_all(MPI_Comm comm, int root, const char *name); -#endif - -/* Prototypes for the test routines */ -void test_metadata(void); -void test_checksum(void); -void test_refstr(void); -void test_file(void); -void test_h5o(void); -void test_h5t(void); -void test_h5s(void); -void test_coords(void); -void test_h5d(void); -void test_attr(void); -void test_select(void); -void test_time(void); -void test_reference(void); -void test_reference_deprec(void); -void test_vltypes(void); -void test_vlstrings(void); -void test_iterate(void); -void test_array(void); -void test_genprop(void); -void test_configure(void); -void test_h5_system(void); -void test_misc(void); -void test_ids(void); -void test_skiplist(void); -void test_sohm(void); -void test_unicode(void); - -/* Prototypes for the cleanup routines */ -void cleanup_metadata(void); -void cleanup_checksum(void); -void cleanup_file(void); -void cleanup_h5o(void); -void cleanup_h5s(void); -void cleanup_coords(void); -void cleanup_attr(void); -void cleanup_select(void); -void cleanup_time(void); -void cleanup_reference(void); -void cleanup_reference_deprec(void); -void cleanup_vltypes(void); -void cleanup_vlstrings(void); -void cleanup_iterate(void); -void cleanup_array(void); -void cleanup_genprop(void); -void cleanup_configure(void); -void cleanup_h5_system(void); -void cleanup_sohm(void); -void cleanup_misc(void); -void cleanup_unicode(void); - -/* Extern global variables */ -extern uint64_t vol_cap_flags_g; - -#ifdef __cplusplus -} -#endif -#endif /* TESTHDF5_H */ diff --git a/test/API/tfile.c b/test/API/tfile.c deleted file mode 100644 index 6b316d47259..00000000000 --- a/test/API/tfile.c +++ /dev/null @@ -1,8369 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tfile - * - * Test the low-level file I/O features. - * - *************************************************************/ - -#include "testhdf5.h" -/* #include "H5srcdir.h" */ - -/* #include "H5Iprivate.h" */ -/* #include "H5Pprivate.h" */ -/* #include "H5VLprivate.h" */ /* Virtual Object Layer */ - -#if 0 -/* - * This file needs to access private information from the H5F package. - * This file also needs to access the file testing code. - */ -#define H5F_FRIEND /*suppress error about including H5Fpkg */ -#define H5F_TESTING -#include "H5Fpkg.h" /* File access */ - -#define H5FD_FRIEND /*suppress error about including H5FDpkg.h */ -#define H5FD_TESTING -#include "H5FDpkg.h" - -#define H5D_FRIEND /*suppress error about including H5Dpkg */ -#include "H5Dpkg.h" /* Dataset access */ - -#define H5S_FRIEND /*suppress error about including H5Spkg */ -#include "H5Spkg.h" /* Dataspace */ - -#define H5T_FRIEND /*suppress error about including H5Tpkg */ -#include "H5Tpkg.h" /* Datatype */ - -#define H5A_FRIEND /*suppress error about including H5Apkg */ -#include "H5Apkg.h" /* Attributes */ - -#define H5O_FRIEND /*suppress error about including H5Opkg */ -#include "H5Opkg.h" /* Object headers */ -#endif - -#define BAD_USERBLOCK_SIZE1 (hsize_t)1 -#define BAD_USERBLOCK_SIZE2 (hsize_t)2 -#define BAD_USERBLOCK_SIZE3 (hsize_t)3 -#define BAD_USERBLOCK_SIZE4 (hsize_t)64 -#define BAD_USERBLOCK_SIZE5 (hsize_t)511 -#define BAD_USERBLOCK_SIZE6 (hsize_t)513 -#define BAD_USERBLOCK_SIZE7 (hsize_t)6144 - -#define F1_USERBLOCK_SIZE (hsize_t)0 -#define F1_OFFSET_SIZE sizeof(haddr_t) -#define F1_LENGTH_SIZE sizeof(hsize_t) -#define F1_SYM_LEAF_K 4 -#define F1_SYM_INTERN_K 16 -#define FILE1 "tfile1.h5" -#define SFILE1 "sys_file1" - -#define REOPEN_FILE "tfile_reopen.h5" -#define REOPEN_DSET "dset" - -#define F2_USERBLOCK_SIZE (hsize_t)512 -#define F2_OFFSET_SIZE 8 -#define F2_LENGTH_SIZE 8 -#define F2_SYM_LEAF_K 8 -#define F2_SYM_INTERN_K 32 -#define F2_RANK 2 -#define F2_DIM0 4 -#define F2_DIM1 6 -#define F2_DSET "dset" -#define FILE2 "tfile2.h5" - -#define F3_USERBLOCK_SIZE (hsize_t)0 -#define F3_OFFSET_SIZE F2_OFFSET_SIZE -#define F3_LENGTH_SIZE F2_LENGTH_SIZE -#define F3_SYM_LEAF_K F2_SYM_LEAF_K -#define F3_SYM_INTERN_K F2_SYM_INTERN_K -#define FILE3 "tfile3.h5" - -#define GRP_NAME "/group" -#define DSET_NAME "dataset" -#define ATTR_NAME "attr" -#define TYPE_NAME "type" -#define FILE4 "tfile4.h5" - -#define OBJ_ID_COUNT_0 0 -#define OBJ_ID_COUNT_1 1 -#define OBJ_ID_COUNT_2 2 -#define OBJ_ID_COUNT_3 3 -#define OBJ_ID_COUNT_4 4 -#define OBJ_ID_COUNT_6 6 -#define OBJ_ID_COUNT_8 8 - -#define GROUP1 "Group1" -#define DSET1 "Dataset1" -#define DSET2 "/Group1/Dataset2" - -#define TESTA_GROUPNAME "group" -#define TESTA_DSETNAME "dataset" -#define TESTA_ATTRNAME "attribute" -#define TESTA_DTYPENAME "compound" -#define TESTA_NAME_BUF_SIZE 64 -#define TESTA_RANK 2 -#define TESTA_NX 4 -#define TESTA_NY 5 - -#define USERBLOCK_SIZE ((hsize_t)512) - -/* Declarations for test_filespace_*() */ -#define FILENAME_LEN 1024 /* length of file name */ -#define DSETNAME "dset" /* Name of dataset */ -#define NELMTS(X) (sizeof(X) / sizeof(X[0])) /* # of elements */ -#define READ_OLD_BUFSIZE 1024 /* Buffer for holding file data */ -#define FILE5 "tfile5.h5" /* Test file */ -#define TEST_THRESHOLD10 10 /* Free space section threshold */ -#define FSP_SIZE_DEF 4096 /* File space page size default */ -#define FSP_SIZE512 512 /* File space page size */ -#define FSP_SIZE1G (1024 * 1024 * 1024) /* File space page size */ - -/* Declaration for test_libver_macros2() */ -#define FILE6 "tfile6.h5" /* Test file */ - -/* Declaration for test_get_obj_ids() */ -#define FILE7 "tfile7.h5" /* Test file */ -#define NGROUPS 2 -#define NDSETS 4 - -/* Declaration for test_incr_filesize() */ -#define FILE8 "tfile8.h5" /* Test file */ - -/* Files created under 1.6 branch and 1.8 branch--used in test_filespace_compatible() */ -const char *OLD_FILENAME[] = { - "filespace_1_6.h5", /* 1.6 HDF5 file */ - "filespace_1_8.h5" /* 1.8 HDF5 file */ -}; - -/* Files created in 1.10.0 release --used in test_filespace_1.10.0_compatible() */ -/* These files are copied from release 1.10.0 tools/h5format_convert/testfiles */ -const char *OLD_1_10_0_FILENAME[] = { - "h5fc_ext1_i.h5", /* 0 */ - "h5fc_ext1_f.h5", /* 1 */ - "h5fc_ext2_if.h5", /* 2 */ - "h5fc_ext2_sf.h5", /* 3 */ - "h5fc_ext3_isf.h5", /* 4 */ - "h5fc_ext_none.h5" /* 5 */ -}; - -/* Files used in test_filespace_round_compatible() */ -const char *FSPACE_FILENAMES[] = { - "fsm_aggr_nopersist.h5", /* H5F_FILE_SPACE_AGGR, not persisting free-space */ - "fsm_aggr_persist.h5", /* H5F_FILE_SPACE_AGGR, persisting free-space */ - "paged_nopersist.h5", /* H5F_FILE_SPACE_PAGE, not persisting free-space */ - "paged_persist.h5", /* H5F_FILE_SPACE_PAGE, persisting free-space */ - "aggr.h5", /* H5F_FILE_SPACE_AGGR */ - "none.h5" /* H5F_FILE_SPACE_NONE */ -}; - -const char *FILESPACE_NAME[] = {"tfilespace.h5", NULL}; - -/* Declarations for test_libver_bounds_copy(): */ -/* SRC_FILE: source file created under 1.8 branch with latest format */ -/* DST_FILE: destination file for copying the dataset in SRC_FILE */ -/* DSET_DS1: the dataset created in SRC_FILE to be copied to DST_FILE */ -#define SRC_FILE "fill18.h5" -#define DST_FILE "fill18_copy.h5" -#define DSET_DS1 "DS1" - -#if 0 -/* Local test function declarations for version bounds */ -static void test_libver_bounds_low_high(const char *env_h5_drvr); -static void test_libver_bounds_super(hid_t fapl, const char *env_h5_drvr); -static void test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm); -static void test_libver_bounds_super_open(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm); -static void test_libver_bounds_obj(hid_t fapl); -static void test_libver_bounds_dataset(hid_t fapl); -static void test_libver_bounds_dataspace(hid_t fapl); -static void test_libver_bounds_datatype(hid_t fapl); -static void test_libver_bounds_datatype_check(hid_t fapl, hid_t tid); -static void test_libver_bounds_attributes(hid_t fapl); -#endif - -#define DSET_NULL "DSET_NULL" -#define DSET "DSET" -#define DSETA "DSETA" -#define DSETB "DSETB" -#define DSETC "DSETC" - -#if 0 -static void -create_objects(hid_t, hid_t, hid_t *, hid_t *, hid_t *, hid_t *); -static void -test_obj_count_and_id(hid_t, hid_t, hid_t, hid_t, hid_t, hid_t); -static void -check_file_id(hid_t, hid_t); -#endif - -#if 0 -/* Helper routine used by test_rw_noupdate() */ -static int cal_chksum(const char *file, uint32_t *chksum); - -static void test_rw_noupdate(void); -#endif - -/**************************************************************** -** -** test_file_create(): Low-level file creation I/O test routine. -** -****************************************************************/ -static void -test_file_create(void) -{ - hid_t fid1 = H5I_INVALID_HID; - hid_t fid2 = H5I_INVALID_HID; - hid_t fid3 = H5I_INVALID_HID; /* HDF5 File IDs */ - hid_t tmpl1, tmpl2; /* file creation templates */ - hsize_t ublock; /* sizeof userblock */ - size_t parm; /* file-creation parameters */ - size_t parm2; /* file-creation parameters */ - unsigned iparm; - unsigned iparm2; - herr_t ret; /*generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Low-Level File Creation I/O\n")); - - /* First ensure the file does not exist */ - H5E_BEGIN_TRY - { - H5Fdelete(FILE1, H5P_DEFAULT); - } - H5E_END_TRY - - /* Try opening a non-existent file */ - H5E_BEGIN_TRY - { - fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(fid1, FAIL, "H5Fopen"); - - /* Test create with various sequences of H5F_ACC_EXCL and */ - /* H5F_ACC_TRUNC flags */ - - /* Create with H5F_ACC_EXCL */ - fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) { - /* - * try to create the same file with H5F_ACC_TRUNC. This should fail - * because fid1 is the same file and is currently open. - */ - H5E_BEGIN_TRY - { - fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fcreate"); - } - - /* Close all files */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - H5E_BEGIN_TRY - { - ret = H5Fclose(fid2); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fclose"); /*file should not have been open */ - - /* - * Try again with H5F_ACC_EXCL. This should fail because the file already - * exists from the previous steps. - */ - H5E_BEGIN_TRY - { - fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(fid1, FAIL, "H5Fcreate"); - - /* Test create with H5F_ACC_TRUNC. This will truncate the existing file. */ - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) { - /* - * Try to truncate first file again. This should fail because fid1 is the - * same file and is currently open. - */ - H5E_BEGIN_TRY - { - fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fcreate"); - } - - /* - * Try with H5F_ACC_EXCL. This should fail too because the file already - * exists. - */ - H5E_BEGIN_TRY - { - fid2 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fcreate"); - - /* Get the file-creation template */ - tmpl1 = H5Fget_create_plist(fid1); - CHECK(tmpl1, FAIL, "H5Fget_create_plist"); - - /* Get the file-creation parameters */ - ret = H5Pget_userblock(tmpl1, &ublock); - CHECK(ret, FAIL, "H5Pget_userblock"); - VERIFY(ublock, F1_USERBLOCK_SIZE, "H5Pget_userblock"); - - ret = H5Pget_sizes(tmpl1, &parm, &parm2); - CHECK(ret, FAIL, "H5Pget_sizes"); - VERIFY(parm, F1_OFFSET_SIZE, "H5Pget_sizes"); - VERIFY(parm2, F1_LENGTH_SIZE, "H5Pget_sizes"); - - ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); - CHECK(ret, FAIL, "H5Pget_sym_k"); - VERIFY(iparm, F1_SYM_INTERN_K, "H5Pget_sym_k"); - VERIFY(iparm2, F1_SYM_LEAF_K, "H5Pget_sym_k"); - - /* Release file-creation template */ - ret = H5Pclose(tmpl1); - CHECK(ret, FAIL, "H5Pclose"); - -#ifdef LATER - /* Double-check that the atom has been vaporized */ - ret = H5Pclose(tmpl1); - VERIFY(ret, FAIL, "H5Pclose"); -#endif - - if (h5_using_default_driver(NULL)) { - - /* Create a new file with a non-standard file-creation template */ - tmpl1 = H5Pcreate(H5P_FILE_CREATE); - CHECK(tmpl1, FAIL, "H5Pcreate"); - - /* Try setting some bad userblock sizes */ - H5E_BEGIN_TRY - { - ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE1); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_userblock"); - H5E_BEGIN_TRY - { - ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE2); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_userblock"); - H5E_BEGIN_TRY - { - ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE3); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_userblock"); - H5E_BEGIN_TRY - { - ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE4); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_userblock"); - H5E_BEGIN_TRY - { - ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE5); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_userblock"); - H5E_BEGIN_TRY - { - ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE6); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_userblock"); - H5E_BEGIN_TRY - { - ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE7); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_userblock"); - - /* Set the new file-creation parameters */ - ret = H5Pset_userblock(tmpl1, F2_USERBLOCK_SIZE); - CHECK(ret, FAIL, "H5Pset_userblock"); - - ret = H5Pset_sizes(tmpl1, (size_t)F2_OFFSET_SIZE, (size_t)F2_LENGTH_SIZE); - CHECK(ret, FAIL, "H5Pset_sizes"); - - ret = H5Pset_sym_k(tmpl1, F2_SYM_INTERN_K, F2_SYM_LEAF_K); - CHECK(ret, FAIL, "H5Pset_sym_k"); - - /* - * Try to create second file, with non-standard file-creation template - * params. - */ - fid2 = H5Fcreate(FILE2, H5F_ACC_TRUNC, tmpl1, H5P_DEFAULT); - CHECK(fid2, FAIL, "H5Fcreate"); - - /* Release file-creation template */ - ret = H5Pclose(tmpl1); - CHECK(ret, FAIL, "H5Pclose"); - - /* Make certain we can create a dataset properly in the file with the userblock */ - { - hid_t dataset_id, dataspace_id; /* identifiers */ - hsize_t dims[F2_RANK]; - unsigned data[F2_DIM0][F2_DIM1]; - unsigned i, j; - - /* Create the data space for the dataset. */ - dims[0] = F2_DIM0; - dims[1] = F2_DIM1; - dataspace_id = H5Screate_simple(F2_RANK, dims, NULL); - CHECK(dataspace_id, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dataset_id = H5Dcreate2(fid2, F2_DSET, H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dcreate2"); - - for (i = 0; i < F2_DIM0; i++) - for (j = 0; j < F2_DIM1; j++) - data[i][j] = i * 10 + j; - - /* Write data to the new dataset */ - ret = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* End access to the dataset and release resources used by it. */ - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Terminate access to the data space. */ - ret = H5Sclose(dataspace_id); - CHECK(ret, FAIL, "H5Sclose"); - } - - /* Get the file-creation template */ - tmpl1 = H5Fget_create_plist(fid2); - CHECK(tmpl1, FAIL, "H5Fget_create_plist"); - - /* Get the file-creation parameters */ - ret = H5Pget_userblock(tmpl1, &ublock); - CHECK(ret, FAIL, "H5Pget_userblock"); - VERIFY(ublock, F2_USERBLOCK_SIZE, "H5Pget_userblock"); - - ret = H5Pget_sizes(tmpl1, &parm, &parm2); - CHECK(ret, FAIL, "H5Pget_sizes"); - VERIFY(parm, F2_OFFSET_SIZE, "H5Pget_sizes"); - VERIFY(parm2, F2_LENGTH_SIZE, "H5Pget_sizes"); - - ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); - CHECK(ret, FAIL, "H5Pget_sym_k"); - VERIFY(iparm, F2_SYM_INTERN_K, "H5Pget_sym_k"); - VERIFY(iparm2, F2_SYM_LEAF_K, "H5Pget_sym_k"); - - /* Clone the file-creation template */ - tmpl2 = H5Pcopy(tmpl1); - CHECK(tmpl2, FAIL, "H5Pcopy"); - - /* Release file-creation template */ - ret = H5Pclose(tmpl1); - CHECK(ret, FAIL, "H5Pclose"); - - /* Set the new file-creation parameter */ - ret = H5Pset_userblock(tmpl2, F3_USERBLOCK_SIZE); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* - * Try to create second file, with non-standard file-creation template - * params - */ - fid3 = H5Fcreate(FILE3, H5F_ACC_TRUNC, tmpl2, H5P_DEFAULT); - CHECK(fid3, FAIL, "H5Fcreate"); - - /* Release file-creation template */ - ret = H5Pclose(tmpl2); - CHECK(ret, FAIL, "H5Pclose"); - - /* Get the file-creation template */ - tmpl1 = H5Fget_create_plist(fid3); - CHECK(tmpl1, FAIL, "H5Fget_create_plist"); - - /* Get the file-creation parameters */ - ret = H5Pget_userblock(tmpl1, &ublock); - CHECK(ret, FAIL, "H5Pget_userblock"); - VERIFY(ublock, F3_USERBLOCK_SIZE, "H5Pget_userblock"); - - ret = H5Pget_sizes(tmpl1, &parm, &parm2); - CHECK(ret, FAIL, "H5Pget_sizes"); - VERIFY(parm, F3_OFFSET_SIZE, "H5Pget_sizes"); - VERIFY(parm2, F3_LENGTH_SIZE, "H5Pget_sizes"); - - ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); - CHECK(ret, FAIL, "H5Pget_sym_k"); - VERIFY(iparm, F3_SYM_INTERN_K, "H5Pget_sym_k"); - VERIFY(iparm2, F3_SYM_LEAF_K, "H5Pget_sym_k"); - - /* Release file-creation template */ - ret = H5Pclose(tmpl1); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close second file */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close third file */ - ret = H5Fclose(fid3); - CHECK(ret, FAIL, "H5Fclose"); - } - - /* Close first file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_file_create() */ - -/**************************************************************** -** -** test_file_open(): Low-level file open I/O test routine. -** -****************************************************************/ -static void -test_file_open(const char *env_h5_drvr) -{ - hid_t fid1; /*HDF5 File IDs */ -#if 0 - hid_t fid2; - hid_t did; /*dataset ID */ - hid_t fapl_id; /*file access property list ID */ -#endif - hid_t tmpl1; /*file creation templates */ - hsize_t ublock; /*sizeof user block */ - size_t parm; /*file-creation parameters */ - size_t parm2; /*file-creation parameters */ - unsigned iparm; - unsigned iparm2; - unsigned intent; - herr_t ret; /*generic return value */ - - /* - * Test single file open - */ - - /* Only run this test with sec2/default driver */ - if (!h5_using_default_driver(env_h5_drvr)) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Low-Level File Opening I/O\n")); - - /* Open first file */ - fid1 = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Get the intent */ - ret = H5Fget_intent(fid1, &intent); - CHECK(ret, FAIL, "H5Fget_intent"); - VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent"); - - /* Get the file-creation template */ - tmpl1 = H5Fget_create_plist(fid1); - CHECK(tmpl1, FAIL, "H5Fget_create_plist"); - - /* Get the file-creation parameters */ - ret = H5Pget_userblock(tmpl1, &ublock); - CHECK(ret, FAIL, "H5Pget_userblock"); - VERIFY(ublock, F2_USERBLOCK_SIZE, "H5Pget_userblock"); - - ret = H5Pget_sizes(tmpl1, &parm, &parm2); - CHECK(ret, FAIL, "H5Pget_sizes"); - VERIFY(parm, F2_OFFSET_SIZE, "H5Pget_sizes"); - VERIFY(parm2, F2_LENGTH_SIZE, "H5Pget_sizes"); - - ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2); - CHECK(ret, FAIL, "H5Pget_sym_k"); - VERIFY(iparm, F2_SYM_INTERN_K, "H5Pget_sym_k"); - VERIFY(iparm2, F2_SYM_LEAF_K, "H5Pget_sym_k"); - - /* Release file-creation template */ - ret = H5Pclose(tmpl1); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close first file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* - * Test two file opens: one is opened H5F_ACC_RDONLY and H5F_CLOSE_WEAK. - * It's closed with an object left open. Then another is opened - * H5F_ACC_RDWR, which should fail. - */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 2 File Openings - SKIPPED for now due to no file close degree support\n")); -#if 0 - /* Create file access property list */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl_id, FAIL, "H5Pcreate"); - - /* Set file close mode to H5F_CLOSE_WEAK */ - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* Open file for first time */ - fid1 = H5Fopen(FILE2, H5F_ACC_RDONLY, fapl_id); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Check the intent */ - ret = H5Fget_intent(fid1, &intent); - CHECK(ret, FAIL, "H5Fget_intent"); - VERIFY(intent, H5F_ACC_RDONLY, "H5Fget_intent"); - - /* Open dataset */ - did = H5Dopen2(fid1, F2_DSET, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Check that the intent works even if NULL is passed in */ - ret = H5Fget_intent(fid1, NULL); - CHECK(ret, FAIL, "H5Fget_intent"); - - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open file for second time, which should fail. */ - H5E_BEGIN_TRY - { - fid2 = H5Fopen(FILE2, H5F_ACC_RDWR, fapl_id); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fopen"); - - /* Check that the intent fails for an invalid ID */ - H5E_BEGIN_TRY - { - ret = H5Fget_intent(fid1, &intent); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fget_intent"); - - /* Close dataset from first open */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Pclose(fapl_id); - CHECK(ret, FAIL, "H5Pclose"); -#endif -} /* test_file_open() */ - -/**************************************************************** -** -** test_file_reopen(): File reopen test routine. -** -****************************************************************/ -static void -test_file_reopen(void) -{ - hid_t fid = -1; /* file ID from initial open */ - hid_t rfid = -1; /* file ID from reopen */ - hid_t did = -1; /* dataset ID (both opens) */ - hid_t sid = -1; /* dataspace ID for dataset creation */ - hsize_t dims = 6; /* dataspace size */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing File Re-opening\n")); - - /* Create file via first ID */ - fid = H5Fcreate(REOPEN_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK_I(fid, "H5Fcreate"); - - /* Create a dataset in the file */ - sid = H5Screate_simple(1, &dims, &dims); - CHECK_I(sid, "H5Screate_simple"); - did = H5Dcreate2(fid, REOPEN_DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK_I(did, "H5Dcreate2"); - - /* Close dataset and dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Reopen the file with a different file ID */ - rfid = H5Freopen(fid); - CHECK_I(rfid, "H5Freopen"); - - /* Reopen the dataset through the reopen file ID */ - did = H5Dopen2(rfid, REOPEN_DSET, H5P_DEFAULT); - CHECK_I(did, "H5Dopen2"); - - /* Close and clean up */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(rfid); - CHECK(ret, FAIL, "H5Fclose"); - H5Fdelete(REOPEN_FILE, H5P_DEFAULT); - -} /* test_file_reopen() */ - -/**************************************************************** -** -** test_file_close(): low-level file close test routine. -** It mainly tests behavior with close degree. -** -*****************************************************************/ -static void -test_file_close(void) -{ -#if 0 - hid_t fid1, fid2; - hid_t fapl_id, access_id; - hid_t dataset_id, group_id1, group_id2, group_id3; - H5F_close_degree_t fc_degree; - herr_t ret; -#endif - - /* Output message about test being performed */ - MESSAGE(5, ("Testing File Closing with file close degrees - SKIPPED for now due to no file close degree " - "support\n")); -#if 0 - /* Test behavior while opening file multiple times with different - * file close degree value - */ - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl_id, FAIL, "H5Pcreate"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - ret = H5Pget_fclose_degree(fapl_id, &fc_degree); - VERIFY(fc_degree, H5F_CLOSE_STRONG, "H5Pget_fclose_degree"); - - /* should fail */ - H5E_BEGIN_TRY - { - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fopen"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should succeed */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close second open */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Test behavior while opening file multiple times with different file - * close degree - */ - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - ret = H5Pget_fclose_degree(fapl_id, &fc_degree); - VERIFY(fc_degree, H5F_CLOSE_WEAK, "H5Pget_fclose_degree"); - - /* should succeed */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close second open */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Test behavior while opening file multiple times with file close - * degree STRONG */ - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid1, FAIL, "H5Fcreate"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should fail */ - H5E_BEGIN_TRY - { - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fopen"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should succeed */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Create a dataset and a group in each file open respectively */ - create_objects(fid1, fid2, NULL, NULL, NULL, NULL); - - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close second open */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Test behavior while opening file multiple times with file close - * degree SEMI */ - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid1, FAIL, "H5Fcreate"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should fail */ - H5E_BEGIN_TRY - { - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fopen"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should succeed */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Create a dataset and a group in each file open respectively */ - create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3); - - /* Close first open, should fail since it is SEMI and objects are - * still open. */ - H5E_BEGIN_TRY - { - ret = H5Fclose(fid1); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fclose"); - - /* Close second open, should fail since it is SEMI and objects are - * still open. */ - H5E_BEGIN_TRY - { - ret = H5Fclose(fid2); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fclose"); - - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Gclose(group_id1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(group_id2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close second open, should fail since it is SEMI and one group ID is - * still open. */ - H5E_BEGIN_TRY - { - ret = H5Fclose(fid2); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fclose"); - - /* Same check with H5Idec_ref() (should fail also) */ - H5E_BEGIN_TRY - { - ret = H5Idec_ref(fid2); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Idec_ref"); - - ret = H5Gclose(group_id3); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close second open again. Should succeed. */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Test behavior while opening file multiple times with file close - * degree WEAK */ - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid1, FAIL, "H5Fcreate"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should fail */ - H5E_BEGIN_TRY - { - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fopen"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should succeed */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Create a dataset and a group in each file open respectively */ - create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3); - - /* Create more new files and test object count and ID list functions */ - test_obj_count_and_id(fid1, fid2, dataset_id, group_id1, group_id2, group_id3); - - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close second open. File will be finally closed after all objects - * are closed. */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Gclose(group_id1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(group_id2); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(group_id3); - CHECK(ret, FAIL, "H5Gclose"); - - /* Test behavior while opening file multiple times with file close - * degree DEFAULT */ - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid1, FAIL, "H5Fcreate"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should fail */ - H5E_BEGIN_TRY - { - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fopen"); - - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); - - /* should succeed */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Create a dataset and a group in each file open respectively */ - create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3); - - access_id = H5Fget_access_plist(fid1); - CHECK(access_id, FAIL, "H5Fget_access_plist"); - - ret = H5Pget_fclose_degree(access_id, &fc_degree); - CHECK(ret, FAIL, "H5Pget_fclose_degree"); - - switch (fc_degree) { - case H5F_CLOSE_STRONG: - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - /* Close second open */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - break; - case H5F_CLOSE_SEMI: - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Gclose(group_id1); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(group_id2); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(group_id3); - CHECK(ret, FAIL, "H5Gclose"); - /* Close second open */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - break; - case H5F_CLOSE_WEAK: - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - /* Close second open */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Gclose(group_id1); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(group_id2); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(group_id3); - CHECK(ret, FAIL, "H5Gclose"); - break; - case H5F_CLOSE_DEFAULT: - default: - CHECK(fc_degree, H5F_CLOSE_DEFAULT, "H5Pget_fclose_degree"); - break; - } - - /* Close file access property list */ - ret = H5Pclose(fapl_id); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(access_id); - CHECK(ret, FAIL, "H5Pclose"); -#endif -} - -/**************************************************************** -** -** create_objects(): routine called by test_file_close to create -** a dataset and a group in file. -** -****************************************************************/ -#if 0 -static void -create_objects(hid_t fid1, hid_t fid2, hid_t *ret_did, hid_t *ret_gid1, hid_t *ret_gid2, hid_t *ret_gid3) -{ - ssize_t oid_count; - herr_t ret; - - /* Check reference counts of file IDs and opened object IDs. - * The verification is hard-coded. If in any case, this testing - * is changed, remember to check this part and update the macros. - */ - { - oid_count = H5Fget_obj_count(fid1, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_2, "H5Fget_obj_count"); - - oid_count = H5Fget_obj_count(fid1, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); - - oid_count = H5Fget_obj_count(fid2, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_2, "H5Fget_obj_count"); - - oid_count = H5Fget_obj_count(fid2, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); - } - - /* create a dataset in the first file open */ - { - hid_t dataset_id, dataspace_id; /* identifiers */ - hsize_t dims[F2_RANK]; - unsigned data[F2_DIM0][F2_DIM1]; - unsigned i, j; - - /* Create the data space for the dataset. */ - dims[0] = F2_DIM0; - dims[1] = F2_DIM1; - dataspace_id = H5Screate_simple(F2_RANK, dims, NULL); - CHECK(dataspace_id, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dataset_id = - H5Dcreate2(fid1, "/dset", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dcreate2"); - - for (i = 0; i < F2_DIM0; i++) - for (j = 0; j < F2_DIM1; j++) - data[i][j] = i * 10 + j; - - /* Write data to the new dataset */ - ret = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - if (ret_did != NULL) - *ret_did = dataset_id; - - /* Terminate access to the data space. */ - ret = H5Sclose(dataspace_id); - CHECK(ret, FAIL, "H5Sclose"); - } - - /* Create a group in the second file open */ - { - hid_t gid1, gid2, gid3; - gid1 = H5Gcreate2(fid2, "/group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - if (ret_gid1 != NULL) - *ret_gid1 = gid1; - - gid2 = H5Gopen2(fid2, "/group", H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - if (ret_gid2 != NULL) - *ret_gid2 = gid2; - - gid3 = H5Gopen2(fid2, "/group", H5P_DEFAULT); - CHECK(gid3, FAIL, "H5Gopen2"); - if (ret_gid3 != NULL) - *ret_gid3 = gid3; - } - - /* Check reference counts of file IDs and opened object IDs. - * The verification is hard-coded. If in any case, this testing - * is changed, remember to check this part and update the macros. - */ - { - oid_count = H5Fget_obj_count(fid1, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_6, "H5Fget_obj_count"); - - oid_count = H5Fget_obj_count(fid1, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); - - oid_count = H5Fget_obj_count(fid2, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_6, "H5Fget_obj_count"); - - oid_count = H5Fget_obj_count(fid2, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); - } -} -#endif - -/**************************************************************** -** -** test_get_obj_ids(): Test the bug and the fix for Jira 8528. -** H5Fget_obj_ids overfilled the list of -** object IDs by one. This is an enhancement -** for test_obj_count_and_id(). -** -****************************************************************/ -static void -test_get_obj_ids(void) -{ - hid_t fid, gid[NGROUPS], dset[NDSETS]; - hid_t filespace; - hsize_t file_dims[F2_RANK] = {F2_DIM0, F2_DIM1}; - ssize_t oid_count, ret_count; - hid_t *oid_list = NULL; - herr_t ret; - int i, m, n; - ssize_t oid_list_size = NDSETS; - char gname[64], dname[64]; - - MESSAGE(5, ("Testing retrieval of object IDs\n")); - - /* Create a new file */ - fid = H5Fcreate(FILE7, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - filespace = H5Screate_simple(F2_RANK, file_dims, NULL); - CHECK(filespace, FAIL, "H5Screate_simple"); - - /* creates NGROUPS groups under the root group */ - for (m = 0; m < NGROUPS; m++) { - snprintf(gname, sizeof(gname), "group%d", m); - gid[m] = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid[m], FAIL, "H5Gcreate2"); - } - - /* create NDSETS datasets under the root group */ - for (n = 0; n < NDSETS; n++) { - snprintf(dname, sizeof(dname), "dataset%d", n); - dset[n] = H5Dcreate2(fid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset[n], FAIL, "H5Dcreate2"); - } - - /* The number of opened objects should be NGROUPS + NDSETS + 1. One is opened file. */ - oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, (NGROUPS + NDSETS + 1), "H5Fget_obj_count"); - - oid_list = (hid_t *)calloc((size_t)oid_list_size, sizeof(hid_t)); - CHECK_PTR(oid_list, "calloc"); - - /* Call the public function H5F_get_obj_ids to use H5F__get_objects. User reported having problem here. - * that the returned size (ret_count) from H5Fget_obj_ids is one greater than the size passed in - * (oid_list_size) */ - ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list); - CHECK(ret_count, FAIL, "H5Fget_obj_ids"); - VERIFY(ret_count, oid_list_size, "H5Fget_obj_count"); - - /* Close all object IDs on the list except the file ID. The first ID is supposed to be file ID according - * to the library design */ - for (i = 0; i < ret_count; i++) { - if (fid != oid_list[i]) { - ret = H5Oclose(oid_list[i]); - CHECK(ret, FAIL, "H5Oclose"); - } - } - - /* The number of opened objects should be NGROUPS + 1 + 1. The first one is opened file. The second one - * is the dataset ID left open from the previous around of H5Fget_obj_ids */ - oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, NGROUPS + 2, "H5Fget_obj_count"); - - /* Get the IDs of the left opened objects */ - ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list); - CHECK(ret_count, FAIL, "H5Fget_obj_ids"); - VERIFY(ret_count, oid_list_size, "H5Fget_obj_count"); - - /* Close all object IDs on the list except the file ID. The first ID is still the file ID */ - for (i = 0; i < ret_count; i++) { - if (fid != oid_list[i]) { - ret = H5Oclose(oid_list[i]); - CHECK(ret, FAIL, "H5Oclose"); - } - } - - H5Sclose(filespace); - H5Fclose(fid); - - free(oid_list); - - /* Reopen the file to check whether H5Fget_obj_count and H5Fget_obj_ids still works - * when the file is closed first */ - fid = H5Fopen(FILE7, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open NDSETS datasets under the root group */ - for (n = 0; n < NDSETS; n++) { - snprintf(dname, sizeof(dname), "dataset%d", n); - dset[n] = H5Dopen2(fid, dname, H5P_DEFAULT); - CHECK(dset[n], FAIL, "H5Dcreate2"); - } - - /* Close the file first */ - H5Fclose(fid); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) { - /* Get the number of all opened objects */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, NDSETS, "H5Fget_obj_count"); - - oid_list = (hid_t *)calloc((size_t)oid_count, sizeof(hid_t)); - CHECK_PTR(oid_list, "calloc"); - - /* Get the list of all opened objects */ - ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list); - CHECK(ret_count, FAIL, "H5Fget_obj_ids"); - VERIFY(ret_count, NDSETS, "H5Fget_obj_ids"); - - H5E_BEGIN_TRY - { - /* Close all open objects with H5Oclose */ - for (n = 0; n < oid_count; n++) - H5Oclose(oid_list[n]); - } - H5E_END_TRY - - free(oid_list); - } -} - -/**************************************************************** -** -** test_get_file_id(): Test H5Iget_file_id() -** -*****************************************************************/ -static void -test_get_file_id(void) -{ -#if 0 - hid_t fid, fid2, fid3; - hid_t datatype_id, dataset_id, dataspace_id, group_id, attr_id; - hid_t plist; - hsize_t dims[F2_RANK]; - unsigned intent; - herr_t ret; -#endif - - MESSAGE(5, ("Testing H5Iget_file_id - SKIPPED for now due to no H5Iget_file_id support\n")); -#if 0 - /* Create a file */ - fid = H5Fcreate(FILE4, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Check the intent */ - ret = H5Fget_intent(fid, &intent); - CHECK(ret, FAIL, "H5Fget_intent"); - VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent"); - - /* Test H5Iget_file_id() */ - check_file_id(fid, fid); - - /* Create a group in the file. Make a duplicated file ID from the group. - * And close this duplicated ID - */ - group_id = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gcreate2"); - - /* Test H5Iget_file_id() */ - check_file_id(fid, group_id); - - /* Close the file and get file ID from the group ID */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Test H5Iget_file_id() */ - check_file_id((hid_t)-1, group_id); - - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open the file again. Test H5Iget_file_id() */ - fid = H5Fopen(FILE4, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - group_id = H5Gopen2(fid, GRP_NAME, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gopen2"); - - /* Test H5Iget_file_id() */ - check_file_id(fid, group_id); - - /* Open the file for second time. Test H5Iget_file_id() */ - fid3 = H5Freopen(fid); - CHECK(fid3, FAIL, "H5Freopen"); - - /* Test H5Iget_file_id() */ - check_file_id(fid3, fid3); - - ret = H5Fclose(fid3); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a dataset in the group. Make a duplicated file ID from the - * dataset. And close this duplicated ID. - */ - dims[0] = F2_DIM0; - dims[1] = F2_DIM1; - dataspace_id = H5Screate_simple(F2_RANK, dims, NULL); - CHECK(dataspace_id, FAIL, "H5Screate_simple"); - - dataset_id = - H5Dcreate2(group_id, DSET_NAME, H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dcreate2"); - - /* Test H5Iget_file_id() */ - check_file_id(fid, dataset_id); - - /* Create an attribute for the dataset. Make a duplicated file ID from - * this attribute. And close it. - */ - attr_id = H5Acreate2(dataset_id, ATTR_NAME, H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Acreate2"); - - /* Test H5Iget_file_id() */ - check_file_id(fid, attr_id); - - /* Create a named datatype. Make a duplicated file ID from - * this attribute. And close it. - */ - datatype_id = H5Tcopy(H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tcopy"); - - ret = H5Tcommit2(fid, TYPE_NAME, datatype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Test H5Iget_file_id() */ - check_file_id(fid, datatype_id); - - /* Create a property list and try to get file ID from it. - * Supposed to fail. - */ - plist = H5Pcreate(H5P_FILE_ACCESS); - CHECK(plist, FAIL, "H5Pcreate"); - - H5E_BEGIN_TRY - { - fid2 = H5Iget_file_id(plist); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Iget_file_id"); - - /* Close objects */ - ret = H5Pclose(plist); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Tclose(datatype_id); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Sclose(dataspace_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -#endif -} - -/**************************************************************** -** -** check_file_id(): Internal function of test_get_file_id() -** -*****************************************************************/ -#if 0 -static void -check_file_id(hid_t fid, hid_t object_id) -{ - hid_t new_fid; - herr_t ret; - - /* Return a duplicated file ID even not expecting user to do it. - * And close this duplicated ID - */ - new_fid = H5Iget_file_id(object_id); - - if (fid >= 0) - VERIFY(new_fid, fid, "H5Iget_file_id"); - else - CHECK(new_fid, FAIL, "H5Iget_file_id"); - - ret = H5Fclose(new_fid); - CHECK(ret, FAIL, "H5Fclose"); -} -#endif - -/**************************************************************** -** -** test_obj_count_and_id(): test object count and ID list functions. -** -****************************************************************/ -#if 0 -static void -test_obj_count_and_id(hid_t fid1, hid_t fid2, hid_t did, hid_t gid1, hid_t gid2, hid_t gid3) -{ - hid_t fid3, fid4; - ssize_t oid_count, ret_count; - herr_t ret; - - /* Create two new files */ - fid3 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid3, FAIL, "H5Fcreate"); - fid4 = H5Fcreate(FILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid4, FAIL, "H5Fcreate"); - - /* test object count of all files IDs open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_FILE); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); - - /* test object count of all datasets open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_1, "H5Fget_obj_count"); - - /* test object count of all groups open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_GROUP); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_3, "H5Fget_obj_count"); - - /* test object count of all named datatypes open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATATYPE); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); - - /* test object count of all attributes open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ATTR); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); - - /* test object count of all objects currently open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_8, "H5Fget_obj_count"); - - if (oid_count > 0) { - hid_t *oid_list; - - oid_list = (hid_t *)calloc((size_t)oid_count, sizeof(hid_t)); - if (oid_list != NULL) { - int i; - - ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list); - CHECK(ret_count, FAIL, "H5Fget_obj_ids"); - - for (i = 0; i < oid_count; i++) { - H5I_type_t id_type; - - id_type = H5Iget_type(oid_list[i]); - switch (id_type) { - case H5I_FILE: - if (oid_list[i] != fid1 && oid_list[i] != fid2 && oid_list[i] != fid3 && - oid_list[i] != fid4) - ERROR("H5Fget_obj_ids"); - break; - - case H5I_GROUP: - if (oid_list[i] != gid1 && oid_list[i] != gid2 && oid_list[i] != gid3) - ERROR("H5Fget_obj_ids"); - break; - - case H5I_DATASET: - VERIFY(oid_list[i], did, "H5Fget_obj_ids"); - break; - - case H5I_MAP: - /* TODO: Not supported in native VOL connector yet */ - - case H5I_UNINIT: - case H5I_BADID: - case H5I_DATATYPE: - case H5I_DATASPACE: - case H5I_ATTR: - case H5I_VFL: - case H5I_VOL: - case H5I_GENPROP_CLS: - case H5I_GENPROP_LST: - case H5I_ERROR_CLASS: - case H5I_ERROR_MSG: - case H5I_ERROR_STACK: - case H5I_SPACE_SEL_ITER: - case H5I_EVENTSET: - case H5I_NTYPES: - default: - ERROR("H5Fget_obj_ids"); - } /* end switch */ - } /* end for */ - - free(oid_list); - } /* end if */ - } /* end if */ - - /* close the two new files */ - ret = H5Fclose(fid3); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(fid4); - CHECK(ret, FAIL, "H5Fclose"); -} -#endif - -/**************************************************************** -** -** test_file_perm(): low-level file test routine. -** This test verifies that a file can be opened for both -** read-only and read-write access and things will be handled -** appropriately. -** -*****************************************************************/ -static void -test_file_perm(void) -{ - hid_t file; /* File opened with read-write permission */ - hid_t filero; /* Same file opened with read-only permission */ - hid_t dspace; /* Dataspace ID */ - hid_t dset; /* Dataset ID */ - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Low-Level File Permissions\n")); - - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); - - /* Create the file (with read-write permission) */ - file = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create a dataset with the read-write file handle */ - dset = H5Dcreate2(file, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open the file (with read-only permission) */ - filero = H5Fopen(FILE2, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(filero, FAIL, "H5Fopen"); - - /* Create a dataset with the read-only file handle (should fail) */ - H5E_BEGIN_TRY - { - dset = H5Dcreate2(filero, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(dset, FAIL, "H5Dcreate2"); - if (dset != FAIL) { - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end if */ - - ret = H5Fclose(filero); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - -} /* end test_file_perm() */ - -/**************************************************************** -** -** test_file_perm2(): low-level file test routine. -** This test verifies that no object can be created in a -** file that is opened for read-only. -** -*****************************************************************/ -static void -test_file_perm2(void) -{ - hid_t file; /* File opened with read-write permission */ - hid_t filero; /* Same file opened with read-only permission */ - hid_t dspace; /* Dataspace ID */ - hid_t group; /* Group ID */ - hid_t dset; /* Dataset ID */ - hid_t type; /* Datatype ID */ - hid_t attr; /* Attribute ID */ - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Low-Level File Permissions again\n")); - - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); - - /* Create the file (with read-write permission) */ - file = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file (with read-only permission) */ - filero = H5Fopen(FILE2, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(filero, FAIL, "H5Fopen"); - - /* Create a group with the read-only file handle (should fail) */ - H5E_BEGIN_TRY - { - group = H5Gcreate2(filero, "MY_GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(group, FAIL, "H5Gcreate2"); - - /* Create a dataset with the read-only file handle (should fail) */ - H5E_BEGIN_TRY - { - dset = H5Dcreate2(filero, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(dset, FAIL, "H5Dcreate2"); - - /* Create an attribute with the read-only file handle (should fail) */ - H5E_BEGIN_TRY - { - attr = H5Acreate2(filero, "MY_ATTR", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(attr, FAIL, "H5Acreate2"); - - type = H5Tcopy(H5T_NATIVE_SHORT); - CHECK(type, FAIL, "H5Tcopy"); - - /* Commit a datatype with the read-only file handle (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Tcommit2(filero, "MY_DTYPE", type, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Tcommit2"); - - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Fclose(filero); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); -} /* end test_file_perm2() */ - -/**************************************************************** -** -** test_file_is_accessible(): low-level file test routine. -** Clone of test_file_ishdf5 but uses the newer VOL-enabled -** H5Fis_accessible() API call. -** -*****************************************************************/ -#define FILE_IS_ACCESSIBLE "tfile_is_accessible" -#define FILE_IS_ACCESSIBLE_NON_HDF5 "tfile_is_accessible_non_hdf5" -static void -test_file_is_accessible(const char *env_h5_drvr) -{ - hid_t fid = H5I_INVALID_HID; /* File opened with read-write permission */ - hid_t fcpl_id = H5I_INVALID_HID; /* File creation property list */ - hid_t fapl_id = H5I_INVALID_HID; /* File access property list */ -#if 0 - int fd; /* POSIX file descriptor */ -#endif - char filename[FILENAME_LEN]; /* Filename to use */ - char non_hdf5_filename[FILENAME_LEN]; /* Base name of non-hdf5 file */ - char non_hdf5_sb_filename[FILENAME_LEN]; /* Name of non-hdf5 superblock file */ -#if 0 - ssize_t nbytes; /* Number of bytes written */ - unsigned u; /* Local index variable */ - unsigned char buf[1024]; /* Buffer of data to write */ -#endif - htri_t is_hdf5; /* Whether a file is an HDF5 file */ -#if 0 - int posix_ret; /* Return value from POSIX calls */ -#endif - bool driver_is_default_compatible; - herr_t ret; /* Return value from HDF5 calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Detection of HDF5 Files\n")); - - /* Get FAPL */ - fapl_id = h5_fileaccess(); - CHECK(fapl_id, H5I_INVALID_HID, "H5Pcreate"); - - if (h5_driver_is_default_vfd_compatible(fapl_id, &driver_is_default_compatible) < 0) { - TestErrPrintf("Can't check if VFD is compatible with default VFD"); - return; - } - - /* Fix up filenames */ - h5_fixname(FILE_IS_ACCESSIBLE, fapl_id, filename, sizeof(filename)); - h5_fixname(FILE_IS_ACCESSIBLE_NON_HDF5, fapl_id, non_hdf5_filename, sizeof(non_hdf5_filename)); - h5_fixname_superblock(FILE_IS_ACCESSIBLE_NON_HDF5, fapl_id, non_hdf5_sb_filename, - sizeof(non_hdf5_sb_filename)); - - /****************/ - /* Normal usage */ - /****************/ - - /* Create a file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Verify that the file is an HDF5 file */ - is_hdf5 = H5Fis_accessible(filename, fapl_id); - VERIFY(is_hdf5, true, "H5Fis_accessible"); - - /*****************************************/ - /* Newly created file that is still open */ - /*****************************************/ - - /* On Windows, file locking is mandatory so this check ensures that - * H5Fis_accessible() works on files that have an exclusive lock. - * Previous versions of this API call created an additional file handle - * and attempted to read through it, which will not work when locks - * are enforced by the OS. - */ - - /* Create a file and hold it open */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Verify that the file is an HDF5 file */ - is_hdf5 = H5Fis_accessible(filename, fapl_id); - VERIFY(is_hdf5, true, "H5Fis_accessible"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*******************************/ - /* Non-default user block size */ - /*******************************/ - - /* This test is not currently working for the family VFD. - * There are failures when creating files with userblocks. - */ - if (0 != strcmp(env_h5_drvr, "family")) { - /* Create a file creation property list with a non-default user block size */ - fcpl_id = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl_id, H5I_INVALID_HID, "H5Pcreate"); - - ret = H5Pset_userblock(fcpl_id, (hsize_t)2048); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file with non-default user block */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Release file-creation property list */ - ret = H5Pclose(fcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Verify that the file is an HDF5 file */ - is_hdf5 = H5Fis_accessible(filename, fapl_id); - VERIFY(is_hdf5, true, "H5Fis_accessible"); - } /* end if */ -#if 0 - if (driver_is_default_compatible) { - /***********************/ - /* EMPTY non-HDF5 file */ - /***********************/ - - /* Create non-HDF5 file and check it */ - fd = HDopen(non_hdf5_sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); - CHECK(fd, (-1), "HDopen"); - - /* Close the file */ - posix_ret = HDclose(fd); - CHECK(posix_ret, (-1), "HDclose"); - - /* Verify that the file is NOT an HDF5 file using the base filename */ - is_hdf5 = H5Fis_accessible(non_hdf5_filename, fapl_id); - VERIFY(is_hdf5, false, "H5Fis_accessible (empty non-HDF5 file)"); - - /***************************/ - /* Non-empty non-HDF5 file */ - /***************************/ - - /* Create non-HDF5 file and check it */ - fd = HDopen(non_hdf5_sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); - CHECK(fd, (-1), "HDopen"); - - /* Initialize information to write */ - for (u = 0; u < 1024; u++) - buf[u] = (unsigned char)u; - - /* Write some information */ - nbytes = HDwrite(fd, buf, (size_t)1024); - VERIFY(nbytes, 1024, "HDwrite"); - - /* Close the file */ - posix_ret = HDclose(fd); - CHECK(posix_ret, (-1), "HDclose"); - - /* Verify that the file is not an HDF5 file */ - is_hdf5 = H5Fis_accessible(non_hdf5_filename, fapl_id); - VERIFY(is_hdf5, false, "H5Fis_accessible (non-HDF5 file)"); - } - - /* Clean up files */ - h5_delete_test_file(filename, fapl_id); - h5_delete_test_file(non_hdf5_filename, fapl_id); -#endif - H5Fdelete(filename, fapl_id); - - /* Close property list */ - ret = H5Pclose(fapl_id); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_file_is_accessible() */ - -/**************************************************************** -** -** test_file_ishdf5(): low-level file test routine. -** This test checks whether the H5Fis_hdf5() routine is working -** correctly in various situations. -** -*****************************************************************/ -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS -static void -test_file_ishdf5(const char *env_h5_drvr) -{ - hid_t fid = H5I_INVALID_HID; /* File opened with read-write permission */ - hid_t fcpl_id = H5I_INVALID_HID; /* File creation property list */ - hid_t fapl_id = H5I_INVALID_HID; /* File access property list */ - int fd; /* POSIX file descriptor */ - char filename[FILENAME_LEN]; /* Filename to use */ - char sb_filename[FILENAME_LEN]; /* Name of file w/ superblock */ - ssize_t nbytes; /* Number of bytes written */ - unsigned u; /* Local index variable */ - unsigned char buf[1024]; /* Buffer of data to write */ - htri_t is_hdf5; /* Whether a file is an HDF5 file */ - int posix_ret; /* Return value from POSIX calls */ - herr_t ret; /* Return value from HDF5 calls */ - - if (!h5_using_default_driver(env_h5_drvr)) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Detection of HDF5 Files (using deprecated H5Fis_hdf5() call)\n")); - - /* Get FAPL */ - fapl_id = h5_fileaccess(); - CHECK(fapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Fix up filenames - * For VFDs that create multiple files, we also need the name - * of the file with the superblock. With single-file VFDs, this - * will be equal to the one from h5_fixname(). - */ - h5_fixname(FILE_IS_ACCESSIBLE, fapl_id, filename, sizeof(filename)); - h5_fixname_superblock(FILE_IS_ACCESSIBLE, fapl_id, sb_filename, sizeof(filename)); - - /****************/ - /* Normal usage */ - /****************/ - - /* Create a file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Verify that the file is an HDF5 file */ - is_hdf5 = H5Fis_hdf5(sb_filename); - VERIFY(is_hdf5, true, "H5Fis_hdf5"); - - /*******************************/ - /* Non-default user block size */ - /*******************************/ - - /* Create a file creation property list with a non-default user block size */ - fcpl_id = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl_id, H5I_INVALID_HID, "H5Pcreate"); - - ret = H5Pset_userblock(fcpl_id, (hsize_t)2048); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file with non-default user block */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Release file creation property list */ - ret = H5Pclose(fcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Verify that the file is an HDF5 file */ - is_hdf5 = H5Fis_hdf5(sb_filename); - VERIFY(is_hdf5, true, "H5Fis_hdf5"); - - /***************************/ - /* Non-empty non-HDF5 file */ - /***************************/ - - /* Create non-HDF5 file. Use the calculated superblock - * filename to avoid the format strings that will make - * open(2) sad. - */ - fd = HDopen(sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); - CHECK(fd, (-1), "HDopen"); - - /* Initialize information to write */ - for (u = 0; u < 1024; u++) - buf[u] = (unsigned char)u; - - /* Write some information */ - nbytes = HDwrite(fd, buf, (size_t)1024); - VERIFY(nbytes, 1024, "HDwrite"); - - /* Close the file */ - posix_ret = HDclose(fd); - CHECK(posix_ret, (-1), "HDclose"); - - /* Verify that the file is not an HDF5 file */ - is_hdf5 = H5Fis_hdf5(sb_filename); - VERIFY(is_hdf5, false, "H5Fis_hdf5"); - - /* Clean up files */ -#if 0 - h5_delete_test_file(filename, fapl_id); -#endif - H5Fdelete(filename, fapl_id); - - /* Close property list */ - ret = H5Pclose(fapl_id); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_file_ishdf5() */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - -/**************************************************************** -** -** test_file_delete(): tests H5Fdelete for all VFDs -** -*****************************************************************/ -#define FILE_DELETE "test_file_delete.h5" -#define FILE_DELETE_NOT_HDF5 "test_file_delete_not_hdf5" -static void -test_file_delete(hid_t fapl_id) -{ - hid_t fid = H5I_INVALID_HID; /* File to be deleted */ - char filename[FILENAME_LEN]; /* Filename to use */ - htri_t is_hdf5; /* Whether a file is an HDF5 file */ -#if 0 - int fd; /* POSIX file descriptor */ - int iret; -#endif - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deletion of HDF5 Files\n")); - - /*************/ - /* HDF5 FILE */ - /*************/ - - /* Get fapl-dependent filename */ - h5_fixname(FILE_DELETE, fapl_id, filename, sizeof(filename)); - - /* Create a file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Close file */ - ret = H5Fclose(fid); - VERIFY(ret, SUCCEED, "H5Fclose"); - - /* Verify that the file is an HDF5 file */ - is_hdf5 = H5Fis_accessible(filename, fapl_id); - VERIFY(is_hdf5, true, "H5Fis_accessible"); - - /* Delete the file */ - ret = H5Fdelete(filename, fapl_id); - VERIFY(ret, SUCCEED, "H5Fdelete"); - - /* Verify that the file is NO LONGER an HDF5 file */ - /* This should fail since there is no file */ - H5E_BEGIN_TRY - { - is_hdf5 = H5Fis_accessible(filename, fapl_id); - } - H5E_END_TRY - VERIFY(is_hdf5, FAIL, "H5Fis_accessible"); - -#if 0 - /* Just in case deletion fails - silent on errors */ - h5_delete_test_file(FILE_DELETE, fapl_id); - - /*****************/ - /* NON-HDF5 FILE */ - /*****************/ - - /* Get fapl-dependent filename */ - h5_fixname(FILE_DELETE_NOT_HDF5, fapl_id, filename, sizeof(filename)); - - /* Create a non-HDF5 file */ - fd = HDopen(filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); - CHECK_I(fd, "HDopen"); - - /* Close the file */ - ret = HDclose(fd); - VERIFY(ret, 0, "HDclose"); - - /* Verify that the file is not an HDF5 file */ - /* Note that you can get a FAIL result when h5_fixname() - * perturbs the filename as a file with that exact name - * may not have been created since we created it with - * open(2) and not the library. - */ - H5E_BEGIN_TRY - { - is_hdf5 = H5Fis_accessible(filename, fapl_id); - } - H5E_END_TRY - CHECK(is_hdf5, true, "H5Fis_accessible"); - - /* Try to delete it (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Fdelete(filename, fapl_id); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fdelete"); - - /* Delete the file */ - iret = H5Fdelete(filename, H5P_DEFAULT); - VERIFY(iret, 0, "H5Fdelete"); -#endif -} /* end test_file_delete() */ - -/**************************************************************** -** -** test_file_open_dot(): low-level file test routine. -** This test checks whether opening objects with "." for a name -** works correctly in various situations. -** -*****************************************************************/ -static void -test_file_open_dot(void) -{ - hid_t fid; /* File ID */ - hid_t gid, gid2; /* Group IDs */ - hid_t did; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid, tid2; /* Datatype IDs */ - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing opening objects with \".\" for a name\n")); - - /* Create a new HDF5 file to work with */ - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group in the HDF5 file */ - gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create a dataspace for creating datasets */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a dataset with no name using the file ID */ - H5E_BEGIN_TRY - { - did = H5Dcreate2(fid, ".", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(did, FAIL, "H5Dcreate2"); - - /* Create a dataset with no name using the group ID */ - H5E_BEGIN_TRY - { - did = H5Dcreate2(gid, ".", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(did, FAIL, "H5Dcreate2"); - - /* Open a dataset with no name using the file ID */ - H5E_BEGIN_TRY - { - did = H5Dopen2(fid, ".", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(did, FAIL, "H5Dopen2"); - - /* Open a dataset with no name using the group ID */ - H5E_BEGIN_TRY - { - did = H5Dopen2(gid, ".", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(did, FAIL, "H5Dopen2"); - - /* Make a copy of a datatype to use for creating a named datatype */ - tid = H5Tcopy(H5T_NATIVE_INT); - CHECK(tid, FAIL, "H5Tcopy"); - - /* Create a named datatype with no name using the file ID */ - H5E_BEGIN_TRY - { - ret = H5Tcommit2(fid, ".", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Tcommit2"); - - /* Create a named datatype with no name using the group ID */ - H5E_BEGIN_TRY - { - ret = H5Tcommit2(gid, ".", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Tcommit2"); - - /* Open a named datatype with no name using the file ID */ - H5E_BEGIN_TRY - { - tid2 = H5Topen2(fid, ".", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tid2, FAIL, "H5Topen2"); - - /* Open a named datatype with no name using the group ID */ - H5E_BEGIN_TRY - { - tid2 = H5Topen2(gid, ".", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tid2, FAIL, "H5Topen2"); - - /* Create a group with no name using the file ID */ - H5E_BEGIN_TRY - { - gid2 = H5Gcreate2(fid, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(gid2, FAIL, "H5Gcreate2"); - - /* Create a group with no name using the group ID */ - H5E_BEGIN_TRY - { - gid2 = H5Gcreate2(gid, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(gid2, FAIL, "H5Gcreate2"); - - /* Open a group with no name using the file ID (should open the root group) */ - gid2 = H5Gopen2(fid, ".", H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open a group with no name using the group ID (should open the group again) */ - gid2 = H5Gopen2(gid, ".", H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close everything */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_file_open_dot() */ - -/**************************************************************** -** -** test_file_open_overlap(): low-level file test routine. -** This test checks whether opening files in an overlapping way -** (as opposed to a nested manner) works correctly. -** -*****************************************************************/ -static void -test_file_open_overlap(void) -{ - hid_t fid1, fid2; - hid_t did1, did2; - hid_t gid; - hid_t sid; - ssize_t nobjs; /* # of open objects */ - unsigned intent; -#if 0 - unsigned long fileno1, fileno2; /* File number */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing opening overlapping file opens\n")); - - /* Create file */ - fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Open file also */ - fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Check the intent */ - ret = H5Fget_intent(fid1, &intent); - CHECK(ret, FAIL, "H5Fget_intent"); - VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent"); -#if 0 - /* Check the file numbers */ - fileno1 = 0; - ret = H5Fget_fileno(fid1, &fileno1); - CHECK(ret, FAIL, "H5Fget_fileno"); - fileno2 = 0; - ret = H5Fget_fileno(fid2, &fileno2); - CHECK(ret, FAIL, "H5Fget_fileno"); - VERIFY(fileno1, fileno2, "H5Fget_fileno"); - - /* Check that a file number pointer of NULL is ignored */ - ret = H5Fget_fileno(fid1, NULL); - CHECK(ret, FAIL, "H5Fget_fileno"); -#endif - - /* Create a group in file */ - gid = H5Gcreate2(fid1, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create dataset in group w/first file ID */ - did1 = H5Dcreate2(gid, DSET1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dcreate2"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) { - /* Check number of objects opened in first file */ - nobjs = H5Fget_obj_count(fid1, H5F_OBJ_LOCAL | H5F_OBJ_ALL); - VERIFY(nobjs, 3, "H5Fget_obj_count"); /* 3 == file, dataset & group */ - } - - /* Close dataset */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close first file ID */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create dataset with second file ID */ - did2 = H5Dcreate2(fid2, DSET2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dcreate2"); - - /* Check number of objects opened in first file */ - nobjs = H5Fget_obj_count(fid2, H5F_OBJ_ALL); - VERIFY(nobjs, 2, "H5Fget_obj_count"); /* 3 == file & dataset */ - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close second dataset */ - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close second file */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_file_open_overlap() */ - -/**************************************************************** -** -** test_file_getname(): low-level file test routine. -** This test checks whether H5Fget_name works correctly. -** -*****************************************************************/ -static void -test_file_getname(void) -{ - /* Compound datatype */ - typedef struct s1_t { - unsigned int a; - float b; - } s1_t; - - hid_t file_id; - hid_t group_id; - hid_t dataset_id; - hid_t space_id; - hid_t type_id; - hid_t attr_id; - hsize_t dims[TESTA_RANK] = {TESTA_NX, TESTA_NY}; - char name[TESTA_NAME_BUF_SIZE]; - ssize_t name_len; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Fget_name() functionality\n")); - - /* Create a new file_id using default properties. */ - file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Get and verify file name */ - name_len = H5Fget_name(file_id, name, (size_t)TESTA_NAME_BUF_SIZE); - CHECK(name_len, FAIL, "H5Fget_name"); - VERIFY_STR(name, FILE1, "H5Fget_name"); - - /* Create a group in the root group */ - group_id = H5Gcreate2(file_id, TESTA_GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gcreate2"); - - /* Get and verify file name */ - name_len = H5Fget_name(group_id, name, (size_t)TESTA_NAME_BUF_SIZE); - CHECK(name_len, FAIL, "H5Fget_name"); - VERIFY_STR(name, FILE1, "H5Fget_name"); - - /* Create the data space */ - space_id = H5Screate_simple(TESTA_RANK, dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - /* Try get file name from data space. Supposed to fail because - * it's illegal operation. */ - H5E_BEGIN_TRY - { - name_len = H5Fget_name(space_id, name, (size_t)TESTA_NAME_BUF_SIZE); - } - H5E_END_TRY - VERIFY(name_len, FAIL, "H5Fget_name"); - - /* Create a new dataset */ - dataset_id = - H5Dcreate2(file_id, TESTA_DSETNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dcreate2"); - - /* Get and verify file name */ - name_len = H5Fget_name(dataset_id, name, (size_t)TESTA_NAME_BUF_SIZE); - CHECK(name_len, FAIL, "H5Fget_name"); - VERIFY_STR(name, FILE1, "H5Fget_name"); - - /* Create an attribute for the dataset */ - attr_id = H5Acreate2(dataset_id, TESTA_ATTRNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - - /* Get and verify file name */ - name_len = H5Fget_name(attr_id, name, (size_t)TESTA_NAME_BUF_SIZE); - CHECK(name_len, FAIL, "H5Fget_name"); - VERIFY_STR(name, FILE1, "H5Fget_name"); - - /* Create a compound datatype */ - type_id = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(type_id, FAIL, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(type_id, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(type_id, "b", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save it on file */ - ret = H5Tcommit2(file_id, TESTA_DTYPENAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Get and verify file name */ - name_len = H5Fget_name(type_id, name, (size_t)TESTA_NAME_BUF_SIZE); - CHECK(name_len, FAIL, "H5Fget_name"); - VERIFY_STR(name, FILE1, "H5Fget_name"); - - /* Close things down */ - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_file_getname() */ - -/**************************************************************** -** -** test_file_double_root_open(): low-level file test routine. -** This test checks whether opening the root group from two -** different files works correctly. -** -*****************************************************************/ -static void -test_file_double_root_open(void) -{ - hid_t file1_id, file2_id; - hid_t grp1_id, grp2_id; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing double root group open\n")); - - file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1_id, FAIL, "H5Fcreate"); - file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file2_id, FAIL, "H5Fopen"); - - grp1_id = H5Gopen2(file1_id, "/", H5P_DEFAULT); - CHECK(grp1_id, FAIL, "H5Gopen2"); - grp2_id = H5Gopen2(file2_id, "/", H5P_DEFAULT); - CHECK(grp2_id, FAIL, "H5Gopen2"); - - /* Note "asymmetric" close order */ - ret = H5Gclose(grp1_id); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(grp2_id); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file1_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_file_double_root_open() */ - -/**************************************************************** -** -** test_file_double_group_open(): low-level file test routine. -** This test checks whether opening the same group from two -** different files works correctly. -** -*****************************************************************/ -static void -test_file_double_group_open(void) -{ - hid_t file1_id, file2_id; - hid_t grp1_id, grp2_id; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing double non-root group open\n")); - - file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1_id, FAIL, "H5Fcreate"); - file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file2_id, FAIL, "H5Fopen"); - - grp1_id = H5Gcreate2(file1_id, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp1_id, FAIL, "H5Gcreate2"); - grp2_id = H5Gopen2(file2_id, GRP_NAME, H5P_DEFAULT); - CHECK(grp2_id, FAIL, "H5Gopen2"); - - /* Note "asymmetric" close order */ - ret = H5Gclose(grp1_id); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(grp2_id); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file1_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_file_double_group_open() */ - -/**************************************************************** -** -** test_file_double_dataset_open(): low-level file test routine. -** This test checks whether opening the same dataset from two -** different files works correctly. -** -*****************************************************************/ -static void -test_file_double_dataset_open(void) -{ - hid_t file1_id, file2_id; - hid_t dset1_id, dset2_id; - hid_t space_id; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing double dataset open\n")); - - file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1_id, FAIL, "H5Fcreate"); - file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file2_id, FAIL, "H5Fopen"); - - /* Create dataspace for dataset */ - space_id = H5Screate(H5S_SCALAR); - CHECK(space_id, FAIL, "H5Screate"); - - dset1_id = - H5Dcreate2(file1_id, DSET_NAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset1_id, FAIL, "H5Dcreate2"); - dset2_id = H5Dopen2(file2_id, DSET_NAME, H5P_DEFAULT); - CHECK(dset2_id, FAIL, "H5Dopen2"); - - /* Close "supporting" dataspace */ - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - /* Note "asymmetric" close order */ - ret = H5Dclose(dset1_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2_id); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Fclose(file1_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_file_double_dataset_open() */ - -/**************************************************************** -** -** test_file_double_file_dataset_open(): -** This test checks multi-opens of files & datasets: -** It simulates the multi-thread test program from DLS -** which exposes the file pointer segmentation fault failure. -** NOTE: The order on when the files and datasets are open/close -** is important. -** -*****************************************************************/ -static void -test_file_double_file_dataset_open(bool new_format) -{ - hid_t fapl = -1; /* File access property list */ - hid_t dcpl = -1; /* Dataset creation property list */ - hid_t fid1 = -1, fid2 = -1; /* File IDs */ - hid_t did1 = -1, did2 = -1; /* Dataset IDs */ - hid_t sid1 = -1, sid2 = -1; /* Dataspace IDs */ - hid_t tid1 = -1, tid2 = -1; /* Datatype IDs */ - hsize_t dims[1] = {5}, dims2[2] = {1, 4}; /* Dimension sizes */ - hsize_t e_ext_dims[1] = {7}; /* Expanded dimension sizes */ - hsize_t s_ext_dims[1] = {3}; /* Shrunk dimension sizes */ - hsize_t max_dims0[1] = {8}; /* Maximum dimension sizes */ - hsize_t max_dims1[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes for extensible array index */ - hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes for v2 B-tree index */ - hsize_t chunks[1] = {2}, chunks2[2] = {4, 5}; /* Chunk dimension sizes */ -#if 0 - hsize_t size; /* File size */ -#endif - char filename[FILENAME_LEN]; /* Filename to use */ - const char *data[] = {"String 1", "String 2", "String 3", "String 4", "String 5"}; /* Input Data */ - const char *e_data[] = {"String 1", "String 2", "String 3", "String 4", - "String 5", "String 6", "String 7"}; /* Input Data */ - char *buffer[5]; /* Output buffer */ - int wbuf[4] = {1, 2, 3, 4}; /* Input data */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing double file and dataset open/close\n")); - - /* Setting up test file */ - fapl = h5_fileaccess(); - CHECK(fapl, FAIL, "H5Pcreate"); - if (new_format) { - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - } /* end if */ - h5_fixname(FILE1, fapl, filename, sizeof filename); - - /* Create the test file */ - fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create a chunked dataset with fixed array indexing */ - sid1 = H5Screate_simple(1, dims, max_dims0); - CHECK(sid1, FAIL, "H5Screate_simple"); - tid1 = H5Tcopy(H5T_C_S1); - CHECK(tid1, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid1, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 1, chunks); - CHECK(ret, FAIL, "H5Pset_chunk"); - - did1 = H5Dcreate2(fid1, "dset_fa", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dcreate2"); - - /* Closing */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a chunked dataset with extensible array indexing */ - sid1 = H5Screate_simple(1, dims, max_dims1); - CHECK(sid1, FAIL, "H5Screate_simple"); - tid1 = H5Tcopy(H5T_C_S1); - CHECK(tid1, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid1, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 1, chunks); - CHECK(ret, FAIL, "H5Pset_chunk"); - - did1 = H5Dcreate2(fid1, "dset_ea", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dcreate2"); - - /* Write to the dataset */ - ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Closing */ - /* (Leave sid1 open for later use) */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a chunked dataset with v2 btree indexing */ - sid2 = H5Screate_simple(2, dims2, max_dims2); - CHECK(sid2, FAIL, "H5Screate_simple"); - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 2, chunks2); - CHECK(ret, FAIL, "H5Pset_chunk"); - - did2 = H5Dcreate2(fid1, "dset_bt2", H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dcreate2"); - - /* Write to the dataset */ - ret = H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Closing */ - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* - * Scenario 1 - */ - - /* First file open */ - fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* First file's dataset open */ - did1 = H5Dopen2(fid1, "/dset_fa", H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dopen2"); - - tid1 = H5Tcopy(did1); - CHECK(tid1, FAIL, "H5Tcopy"); - - /* First file's dataset write */ - ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Second file open */ - fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Second file's dataset open */ - did2 = H5Dopen2(fid2, "/dset_fa", H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dopen2"); - - tid2 = H5Tcopy(did2); - CHECK(tid2, FAIL, "H5Tcopy"); - - /* First file's dataset close */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - - /* First file close */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Second file's dataset write */ - ret = H5Dwrite(did2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Second file's dataset close */ - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Second file close */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Closing */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* - * Scenario 2 - */ - - /* First file open */ - fid1 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Second file open */ - fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Second file's dataset open */ - did2 = H5Dopen2(fid2, "/dset_ea", H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dopen2"); - - tid2 = H5Tcopy(did2); - CHECK(tid2, FAIL, "H5Tcopy"); - - /* First file's dataset open */ - did1 = H5Dopen2(fid1, "/dset_ea", H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dopen2"); - - tid1 = H5Tcopy(did1); - CHECK(tid1, FAIL, "H5Tcopy"); - - /* Second file's dataset read */ - memset(buffer, 0, sizeof(char *) * 5); - ret = H5Dread(did2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer); - CHECK(ret, FAIL, "H5Dread"); - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, buffer); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Second file's dataset close */ - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Second file close */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* First file's dataset read */ - memset(buffer, 0, sizeof(char *) * 5); - ret = H5Dread(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer); - CHECK(ret, FAIL, "H5Dread"); - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, buffer); - CHECK(ret, FAIL, "H5Treclaim"); - - /* First file's dataset close */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - - /* First file close */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Closing */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* - * Scenario 3 - */ - - /* First file open */ - fid1 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* First file's dataset open */ - did1 = H5Dopen2(fid1, "/dset_bt2", H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dopen2"); -#if 0 - /* First file's get storage size */ - size = H5Dget_storage_size(did1); - CHECK(size, 0, "H5Dget_storage_size"); -#endif - /* Second file open */ - fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Second file's dataset open */ - did2 = H5Dopen2(fid2, "/dset_bt2", H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dopen2"); - - /* First file's dataset close */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - - /* First file close */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Second file's get storage size */ - size = H5Dget_storage_size(did2); - CHECK(size, 0, "H5Dget_storage_size"); -#endif - /* Second file's dataset close */ - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Second file close */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* - * Scenario 4 - * --trigger H5AC_protect: Assertion `f->shared' failed - * from second call to - * H5Dset_extent->...H5D__earray_idx_remove->H5EA_get...H5EA__iblock_protect...H5AC_protect - */ - /* First file open */ - fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl); - CHECK(fid1, FAIL, "H5Fopen"); - - /* First file's dataset open */ - did1 = H5Dopen2(fid1, "/dset_ea", H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dopen2"); - - tid1 = H5Tcopy(did1); - CHECK(tid1, FAIL, "H5Tcopy"); - - /* Extend the dataset */ - ret = H5Dset_extent(did1, e_ext_dims); - CHECK(ret, FAIL, "H5Dset_extent"); - - /* Write to the dataset */ - ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, e_data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Second file open */ - fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Second file's dataset open */ - did2 = H5Dopen2(fid2, "/dset_ea", H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dopen2"); - - /* First file's dataset close */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Dclose"); - - /* First file close */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Shrink the dataset */ - ret = H5Dset_extent(did2, s_ext_dims); - CHECK(ret, FAIL, "H5Dset_extent"); - - /* Second file's dataset close */ - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Second file close */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close the data type */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close FAPL */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); -} /* end test_file_double_dataset_open() */ - -/**************************************************************** -** -** test_file_double_datatype_open(): low-level file test routine. -** This test checks whether opening the same named datatype from two -** different files works correctly. -** -*****************************************************************/ -static void -test_file_double_datatype_open(void) -{ - hid_t file1_id, file2_id; - hid_t type1_id, type2_id; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing double datatype open\n")); - - file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1_id, FAIL, "H5Fcreate"); - file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file2_id, FAIL, "H5Fopen"); - - type1_id = H5Tcopy(H5T_NATIVE_INT); - CHECK(type1_id, FAIL, "H5Tcopy"); - ret = H5Tcommit2(file1_id, TYPE_NAME, type1_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - type2_id = H5Topen2(file2_id, TYPE_NAME, H5P_DEFAULT); - CHECK(type2_id, FAIL, "H5Topen2"); - - /* Note "asymmetric" close order */ - ret = H5Tclose(type1_id); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(type2_id); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Fclose(file1_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_file_double_dataset_open() */ - -/**************************************************************** -** -** test_userblock_file_size(): low-level file test routine. -** This test checks that the presence of a userblock -** affects the file size in the expected manner, and that -** the filesize is not changed by reopening the file. It -** creates two files which are identical except that one -** contains a userblock, and verifies that their file sizes -** differ exactly by the userblock size. -** -*****************************************************************/ -#if 0 -static void -test_userblock_file_size(const char *env_h5_drvr) -{ - hid_t file1_id, file2_id; - hid_t group1_id, group2_id; - hid_t dset1_id, dset2_id; - hid_t space_id; - hid_t fcpl2_id; - hsize_t dims[2] = {3, 4}; -#if 0 - hsize_t filesize1, filesize2, filesize; - unsigned long fileno1, fileno2; /* File number */ -#endif - herr_t ret; /* Generic return value */ - - /* Don't run with multi/split, family or direct drivers */ - if (!strcmp(env_h5_drvr, "multi") || !strcmp(env_h5_drvr, "split") || - !strcmp(env_h5_drvr, "family") || !strcmp(env_h5_drvr, "direct")) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing file size with user block\n")); - - /* Create property list with userblock size set */ - fcpl2_id = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl2_id, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl2_id, USERBLOCK_SIZE); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create files. Only file2 with have a userblock. */ - file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1_id, FAIL, "H5Fcreate"); - file2_id = H5Fcreate(FILE2, H5F_ACC_TRUNC, fcpl2_id, H5P_DEFAULT); - CHECK(file2_id, FAIL, "H5Fcreate"); -#if 0 - /* Check the file numbers */ - fileno1 = 0; - ret = H5Fget_fileno(file1_id, &fileno1); - CHECK(ret, FAIL, "H5Fget_fileno"); - fileno2 = 0; - ret = H5Fget_fileno(file2_id, &fileno2); - CHECK(ret, FAIL, "H5Fget_fileno"); - CHECK(fileno1, fileno2, "H5Fget_fileno"); -#endif - /* Create groups */ - group1_id = H5Gcreate2(file1_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group1_id, FAIL, "H5Gcreate2"); - group2_id = H5Gcreate2(file2_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group2_id, FAIL, "H5Gcreate2"); - - /* Create dataspace */ - space_id = H5Screate_simple(2, dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - /* Create datasets */ - dset1_id = H5Dcreate2(file1_id, DSET2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset1_id, FAIL, "H5Dcreate2"); - dset2_id = H5Dcreate2(file2_id, DSET2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset2_id, FAIL, "H5Dcreate2"); - - /* Close IDs */ - ret = H5Dclose(dset1_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(dset2_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Gclose(group1_id); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(group2_id); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Pclose(fcpl2_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close files */ - ret = H5Fclose(file1_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Reopen files */ - file1_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file1_id, FAIL, "H5Fopen"); - file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file2_id, FAIL, "H5Fopen"); -#if 0 - /* Check file sizes */ - ret = H5Fget_filesize(file1_id, &filesize1); - CHECK(ret, FAIL, "H5Fget_filesize"); - ret = H5Fget_filesize(file2_id, &filesize2); - CHECK(ret, FAIL, "H5Fget_filesize"); - - /* Verify that the file sizes differ exactly by the userblock size */ - VERIFY_TYPE((unsigned long long)filesize2, (unsigned long long)(filesize1 + USERBLOCK_SIZE), - unsigned long long, "%llu", "H5Fget_filesize"); -#endif - /* Close files */ - ret = H5Fclose(file1_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Reopen files */ - file1_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file1_id, FAIL, "H5Fopen"); - file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file2_id, FAIL, "H5Fopen"); -#if 0 - /* Verify file sizes did not change */ - ret = H5Fget_filesize(file1_id, &filesize); - CHECK(ret, FAIL, "H5Fget_filesize"); - VERIFY(filesize, filesize1, "H5Fget_filesize"); - ret = H5Fget_filesize(file2_id, &filesize); - CHECK(ret, FAIL, "H5Fget_filesize"); - VERIFY(filesize, filesize2, "H5Fget_filesize"); -#endif - /* Close files */ - ret = H5Fclose(file1_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_userblock_file_size() */ -#endif - -/**************************************************************** -** -** test_cached_stab_info(): low-level file test routine. -** This test checks that new files are created with cached -** symbol table information in the superblock (when using -** the old format). This is necessary to ensure backwards -** compatibility with versions from 1.3.0 to 1.6.3. -** -*****************************************************************/ -#if 0 -static void -test_cached_stab_info(void) -{ - hid_t file_id; - hid_t group_id; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing cached symbol table information\n")); - - /* Create file */ - file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Create group */ - group_id = H5Gcreate2(file_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gcreate2"); - - /* Close file and group */ - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Reopen file */ - file_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); -#if 0 - /* Verify the cached symbol table information */ - ret = H5F__check_cached_stab_test(file_id); - CHECK(ret, FAIL, "H5F__check_cached_stab_test"); -#endif - /* Close file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_cached_stab_info() */ -#endif - -#if 0 -/* - * To calculate the checksum for a file. - * This is a helper routine for test_rw_noupdate(). - */ -static int -cal_chksum(const char *file, uint32_t *chksum) -{ - int curr_num_errs = nerrors; /* Retrieve the current # of errors */ - int fdes = -1; /* File descriptor */ - void *file_data = NULL; /* Copy of file data */ - ssize_t bytes_read; /* # of bytes read */ - h5_stat_t sb; /* Stat buffer for file */ - herr_t ret; /* Generic return value */ - - /* Open the file */ - fdes = HDopen(file, O_RDONLY); - CHECK(fdes, FAIL, "HDopen"); - - /* Retrieve the file's size */ - ret = HDfstat(fdes, &sb); - CHECK(fdes, FAIL, "HDfstat"); - - /* Allocate space for the file data */ - file_data = malloc((size_t)sb.st_size); - CHECK_PTR(file_data, "malloc"); - - if (file_data) { - /* Read file's data into memory */ - bytes_read = HDread(fdes, file_data, (size_t)sb.st_size); - CHECK(bytes_read == sb.st_size, false, "malloc"); - - /* Calculate checksum */ - *chksum = H5_checksum_lookup3(file_data, sizeof(file_data), 0); - - /* Free memory */ - free(file_data); - } - - /* Close the file */ - ret = HDclose(fdes); - CHECK(ret, FAIL, "HDclose"); - - return ((nerrors == curr_num_errs) ? 0 : -1); -} /* cal_chksum() */ -#endif - -/**************************************************************** -** -** test_rw_noupdate(): low-level file test routine. -** This test checks to ensure that opening and closing a file -** with read/write permissions does not write anything to the -** file if the file does not change. -** Due to the implementation of file locking (status_flags in -** the superblock is used), this test is changed to use checksum -** instead of timestamp to verify the file is not changed. -** -*****************************************************************/ -#if 0 -static void -test_rw_noupdate(void) -{ - herr_t ret; /* Generic return value */ - hid_t fid; /* File ID */ - uint32_t chksum1, chksum2; /* Checksum value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing to verify that nothing is written if nothing is changed.\n")); - - /* Create and Close a HDF5 File */ - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Calculate checksum for the file */ - ret = cal_chksum(FILE1, &chksum1); - CHECK(ret, FAIL, "cal_chksum"); - - /* Open and close File With Read/Write Permission */ - fid = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Calculate checksum for the file */ - ret = cal_chksum(FILE1, &chksum2); - CHECK(ret, FAIL, "cal_chksum"); - - /* The two checksums are the same, i.e. the file is not changed */ - VERIFY(chksum1, chksum2, "Checksum"); - -} /* end test_rw_noupdate() */ -#endif - -/**************************************************************** -** -** test_userblock_alignment_helper1(): helper routine for -** test_userblock_alignment() test, to handle common testing -** -*****************************************************************/ -#if 0 -static int -test_userblock_alignment_helper1(hid_t fcpl, hid_t fapl) -{ - hid_t fid; /* File ID */ - int curr_num_errs = nerrors(); /* Retrieve the current # of errors */ - herr_t ret; /* Generic return value */ - - /* Create a file with FAPL & FCPL */ - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Only proceed further if file ID is OK */ - if (fid > 0) { - hid_t gid; /* Group ID */ - hid_t sid; /* Dataspace ID */ - hid_t did; /* Dataset ID */ - int val = 2; /* Dataset value */ - - /* Create a group */ - gid = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create a dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - did = H5Dcreate2(gid, "dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Write value to dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end if */ - - return ((nerrors == curr_num_errs) ? 0 : -1); -} /* end test_userblock_alignment_helper1() */ - -/**************************************************************** -** -** test_userblock_alignment_helper2(): helper routine for -** test_userblock_alignment() test, to handle common testing -** -*****************************************************************/ -static int -test_userblock_alignment_helper2(hid_t fapl, bool open_rw) -{ - hid_t fid; /* File ID */ - int curr_num_errs = nerrors(); /* Retrieve the current # of errors */ - herr_t ret; /* Generic return value */ - - /* Re-open file */ - fid = H5Fopen(FILE1, (open_rw ? H5F_ACC_RDWR : H5F_ACC_RDONLY), fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Only proceed further if file ID is OK */ - if (fid > 0) { - hid_t gid; /* Group ID */ - hid_t did; /* Dataset ID */ - int val = -1; /* Dataset value */ - - /* Open group */ - gid = H5Gopen2(fid, "group1", H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Open dataset */ - did = H5Dopen2(gid, "dataset", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Read value from dataset */ - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); - CHECK(ret, FAIL, "H5Dread"); - VERIFY(val, 2, "H5Dread"); - - /* Close dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Only create new objects if file is open R/W */ - if (open_rw) { - hid_t gid2; /* Group ID */ - - /* Create a new group */ - gid2 = H5Gcreate2(gid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Close new group */ - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - } /* end if */ - - /* Close group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end if */ - - return ((nerrors == curr_num_errs) ? 0 : -1); -} /* end test_userblock_alignment_helper2() */ - -/**************************************************************** -** -** test_userblock_alignment(): low-level file test routine. -** This test checks to ensure that files with both a userblock and a -** object [allocation] alignment size set interact properly. -** -*****************************************************************/ -static void -test_userblock_alignment(const char *env_h5_drvr) -{ - hid_t fid; /* File ID */ - hid_t fcpl; /* File creation property list ID */ - hid_t fapl; /* File access property list ID */ - herr_t ret; /* Generic return value */ - - /* Only run with sec2 driver */ - if (!h5_using_default_driver(env_h5_drvr)) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing that non-zero userblocks and object alignment interact correctly.\n")); - - /* Case 1: - * Userblock size = 0, alignment != 0 - * Outcome: - * Should succeed - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)0); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Case 2: - * Userblock size = 512, alignment = 16 - * (userblock is integral mult. of alignment) - * Outcome: - * Should succeed - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Case 3: - * Userblock size = 512, alignment = 512 - * (userblock is equal to alignment) - * Outcome: - * Should succeed - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Case 4: - * Userblock size = 512, alignment = 3 - * (userblock & alignment each individually valid, but userblock is - * non-integral multiple of alignment) - * Outcome: - * Should fail at file creation - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Create a file with FAPL & FCPL */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - VERIFY(fid, FAIL, "H5Fcreate"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Case 5: - * Userblock size = 512, alignment = 1024 - * (userblock & alignment each individually valid, but userblock is - * less than alignment) - * Outcome: - * Should fail at file creation - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Create a file with FAPL & FCPL */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - VERIFY(fid, FAIL, "H5Fcreate"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Case 6: - * File created with: - * Userblock size = 512, alignment = 512 - * File re-opened for read-only & read-write access with: - * Userblock size = 512, alignment = 1024 - * Outcome: - * Should succeed - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - - /* Change alignment in FAPL */ - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper2(fapl, false); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); -} /* end test_userblock_alignment() */ - -/**************************************************************** -** -** test_userblock_alignment_paged(): low-level file test routine. -** This test checks to ensure that files with both a userblock and -** alignment interact properly: -** -- alignment via H5Pset_alignment -** -- alignment via paged aggregation -** -*****************************************************************/ -static void -test_userblock_alignment_paged(const char *env_h5_drvr) -{ - hid_t fid; /* File ID */ - hid_t fcpl; /* File creation property list ID */ - hid_t fapl; /* File access property list ID */ - herr_t ret; /* Generic return value */ - - /* Only run with sec2 driver */ - if (!h5_using_default_driver(env_h5_drvr)) - return; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing interaction between userblock and alignment (via paged aggregation and " - "H5Pset_alignment)\n")); - - /* - * Case 1: - * Userblock size = 0 - * Alignment in use = 4096 - * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 4096 - * Outcome: - * Should succeed: - * userblock is 0 and alignment != 0 - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)0); - CHECK(ret, FAIL, "H5Pset_userblock"); - - /* Create file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Set the "use the latest version of the format" bounds */ - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 2a: - * Userblock size = 1024 - * Alignment in use = 512 - * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512 - * H5Pset_alignment() is 3 - * Outcome: - * Should succeed: - * userblock (1024) is integral mult. of alignment (512) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, (hsize_t)1); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); - - /* Create file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 2b: - * Userblock size = 1024 - * Alignment in use = 3 - * Strategy = H5F_FILE_SPACE_AGGR; fsp_size = 512 - * (via default file creation property) - * H5Pset_alignment() is 3 - * Outcome: - * Should fail at file creation: - * userblock (1024) is non-integral mult. of alignment (3) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); - - /* Create file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Create a file with FAPL & FCPL */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - VERIFY(fid, FAIL, "H5Fcreate"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 3a: - * Userblock size = 512 - * Alignment in use = 512 - * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512 - * H5Pset_alignment() is 3 - * Outcome: - * Should succeed: - * userblock (512) is equal to alignment (512) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, true, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 3b: - * Userblock size = 512 - * Alignment in use = 3 - * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 512 - * H5Pset_alignment() is 3 - * Outcome: - * Should fail at file creation: - * userblock (512) is non-integral mult. of alignment (3) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Create a file with FAPL & FCPL */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - VERIFY(fid, FAIL, "H5Fcreate"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 4a: - * Userblock size = 1024 - * Alignment in use = 1023 - * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1023 - * H5Pset_alignment() is 16 - * Outcome: - * Should fail at file creation: - * userblock (1024) is non-integral multiple of alignment (1023) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, true, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Create a file with FAPL & FCPL */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - VERIFY(fid, FAIL, "H5Fcreate"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 4b: - * Userblock size = 1024 - * Alignment in use = 16 - * Strategy is H5F_FILE_SPACE_FSM_AGGR; fsp_size = 1023 - * H5Pset_alignment() is 16 - * Outcome: - * Should succeed: - * userblock (512) is integral multiple of alignment (16) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, false, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 5a: - * Userblock size = 512 - * Alignment in use = 1024 - * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1024 - * H5Pset_alignment() is 16 - * Outcome: - * Should fail at file creation: - * userblock (512) is less than alignment (1024) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Create a file with FAPL & FCPL */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - VERIFY(fid, FAIL, "H5Fcreate"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 5b: - * Userblock size = 512 - * Alignment in use = 16 - * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 1024 - * H5Pset_alignment() is 16 - * Outcome: - * Should succeed: - * userblock (512) is integral multiple of alignment (16) - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case 6: - * Userblock size = 512 - * Alignment in use = 512 - * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512 - * H5Pset_alignment() is 3 - * Reopen the file; H5Pset_alignment() is 1024 - * Outcome: - * Should succeed: - * Userblock (512) is the same as alignment (512); - * The H5Pset_alignment() calls have no effect - */ - /* Create file creation property list with user block */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - ret = H5Pset_userblock(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_userblock"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Create file access property list with alignment */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper1(fcpl, fapl); - CHECK(ret, FAIL, "test_userblock_alignment_helper1"); - - /* Change alignment in FAPL */ - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Call helper routines to perform file manipulations */ - ret = test_userblock_alignment_helper2(fapl, false); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - ret = test_userblock_alignment_helper2(fapl, true); - CHECK(ret, FAIL, "test_userblock_alignment_helper2"); - - /* Release property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_userblock_alignment_paged() */ -#endif - -/**************************************************************** -** -** test_filespace_info(): -** Verify the following public routines retrieve and set file space -** information correctly: -** (1) H5Pget/set_file_space_strategy(): -** Retrieve and set file space strategy, persisting free-space, -** and free-space section threshold as specified -** (2) H5Pget/set_file_space_page_size(): -** Retrieve and set the page size for paged aggregation -** -****************************************************************/ -#if 0 -static void -test_filespace_info(const char *env_h5_drvr) -{ - hid_t fid; /* File IDs */ - hid_t fapl, new_fapl; /* File access property lists */ - hid_t fcpl, fcpl1, fcpl2; /* File creation property lists */ - H5F_fspace_strategy_t strategy; /* File space strategy */ - bool persist; /* Persist free-space or not */ - hsize_t threshold; /* Free-space section threshold */ - unsigned new_format; /* New or old format */ - H5F_fspace_strategy_t fs_strategy; /* File space strategy--iteration variable */ - unsigned fs_persist; /* Persist free-space or not--iteration variable */ - hsize_t fs_threshold; /* Free-space section threshold--iteration variable */ - hsize_t fsp_size; /* File space page size */ - char filename[FILENAME_LEN]; /* Filename to use */ - bool contig_addr_vfd; /* Whether VFD used has a contiguous address space */ - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing file creation public routines: H5Pget/set_file_space_strategy & " - "H5Pget/set_file_space_page_size\n")); - - contig_addr_vfd = (bool)(strcmp(env_h5_drvr, "split") != 0 && strcmp(env_h5_drvr, "multi") != 0); - - fapl = h5_fileaccess(); - h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); - - /* Get a copy of the file access property list */ - new_fapl = H5Pcopy(fapl); - CHECK(new_fapl, FAIL, "H5Pcopy"); - - /* Set the "use the latest version of the format" bounds */ - ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* - * Case (1) - * Check file space information from a default file creation property list. - * Values expected: - * strategy--H5F_FILE_SPACE_AGGR - * persist--false - * threshold--1 - * file space page size--4096 - */ - /* Create file creation property list template */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* Retrieve file space information */ - ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* Verify file space information */ - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - - /* Retrieve file space page size */ - ret = H5Pget_file_space_page_size(fcpl, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size"); - - /* Close property list */ - H5Pclose(fcpl); - - /* - * Case (2) - * File space page size has a minimum size of 512. - * Setting value less than 512 will return an error; - * --setting file space page size to 0 - * --setting file space page size to 511 - * - * File space page size has a maximum size of 1 gigabyte. - * Setting value greater than 1 gigabyte will return an error. - */ - /* Create file creation property list template */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* Setting to 0: should fail */ - H5E_BEGIN_TRY - { - ret = H5Pset_file_space_page_size(fcpl, 0); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Setting to 511: should fail */ - H5E_BEGIN_TRY - { - ret = H5Pset_file_space_page_size(fcpl, 511); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Setting to 1GB+1: should fail */ - H5E_BEGIN_TRY - { - ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G + 1); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_file_space_page_size"); - - /* Setting to 512: should succeed */ - ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - ret = H5Pget_file_space_page_size(fcpl, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); - - /* Setting to 1GB: should succeed */ - ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G); - CHECK(ret, FAIL, "H5Pset_file_space_page_size"); - ret = H5Pget_file_space_page_size(fcpl, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE1G, "H5Pget_file_space_page_size"); - - /* Close property list */ - H5Pclose(fcpl); - - /* - * Case (3) - * Check file space information when creating a file with default properties. - * Values expected: - * strategy--H5F_FILE_SPACE_AGGR - * persist--false - * threshold--1 - * file space page size--4096 - */ - /* Create a file with default file creation and access property lists */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Get the file's creation property list */ - fcpl1 = H5Fget_create_plist(fid); - CHECK(fcpl1, FAIL, "H5Fget_create_plist"); - - /* Retrieve file space information */ - ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* Verify file space information */ - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - - /* Retrieve file space page size */ - ret = H5Pget_file_space_page_size(fcpl1, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size"); - - /* Close property lists */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Pclose(fcpl1); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case (4) - * Check file space information when creating a file with the - * latest library format and default properties. - * Values expected: - * strategy--H5F_FILE_SPACE_AGGR - * persist--false - * threshold--1 - * file space page size--4096 - */ - /* Create a file with the latest library format */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, new_fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Get the file's creation property */ - fcpl1 = H5Fget_create_plist(fid); - CHECK(fcpl1, FAIL, "H5Fget_create_plist"); - - /* Retrieve file space information */ - ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* Verify file space information */ - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - - /* Retrieve file space page size */ - ret = H5Pget_file_space_page_size(fcpl1, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size"); - - /* Close property lists */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Pclose(fcpl1); - CHECK(ret, FAIL, "H5Pclose"); - - /* - * Case (5) - * Check file space information with the following combinations: - * Create file with -- - * New or old format - * Persist or not persist free-space - * Different sizes for free-space section threshold (0 to 10) - * The four file space strategies: - * H5F_FSPACE_STRATEGY_FSM_AGGR, H5F_FSPACE_STRATEGY_PAGE, - * H5F_FSPACE_STRATEGY_AGGR, H5F_FSPACE_STRATEGY_NONE - * File space page size: set to 512 - * - */ - for (new_format = false; new_format <= true; new_format++) { - hid_t my_fapl; - - /* Set the FAPL for the type of format */ - if (new_format) { - MESSAGE(5, ("Testing with new group format\n")); - my_fapl = new_fapl; - } /* end if */ - else { - MESSAGE(5, ("Testing with old group format\n")); - my_fapl = fapl; - } /* end else */ - - /* Test with true or false for persisting free-space */ - for (fs_persist = false; fs_persist <= true; fs_persist++) { - - /* Test with free-space section threshold size: 0 to 10 */ - for (fs_threshold = 0; fs_threshold <= TEST_THRESHOLD10; fs_threshold++) { - - /* Test with 4 file space strategies */ - for (fs_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR; fs_strategy < H5F_FSPACE_STRATEGY_NTYPES; - fs_strategy++) { - - if (!contig_addr_vfd && (fs_strategy == H5F_FSPACE_STRATEGY_PAGE || fs_persist)) - continue; - - /* Create file creation property list template */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* Set file space information */ - ret = H5Pset_file_space_strategy(fcpl, fs_strategy, (bool)fs_persist, fs_threshold); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - - ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - - /* Retrieve file space information */ - ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* Verify file space information */ - VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy"); - - if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) { - VERIFY(persist, (bool)fs_persist, "H5Pget_file_space_strategy"); - VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy"); - } - else { - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - } - - /* Retrieve and verify file space page size */ - ret = H5Pget_file_space_page_size(fcpl, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); - - /* Create the file with the specified file space info */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Get the file's creation property */ - fcpl1 = H5Fget_create_plist(fid); - CHECK(fcpl1, FAIL, "H5Fget_create_plist"); - - /* Retrieve file space information */ - ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* Verify file space information */ - VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy"); - - if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) { - VERIFY(persist, fs_persist, "H5Pget_file_space_strategy"); - VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy"); - } - else { - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - } - - /* Retrieve and verify file space page size */ - ret = H5Pget_file_space_page_size(fcpl1, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid = H5Fopen(filename, H5F_ACC_RDWR, my_fapl); - CHECK(ret, FAIL, "H5Fopen"); - - /* Get the file's creation property */ - fcpl2 = H5Fget_create_plist(fid); - CHECK(fcpl2, FAIL, "H5Fget_create_plist"); - - /* Retrieve file space information */ - ret = H5Pget_file_space_strategy(fcpl2, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* Verify file space information */ - VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy"); - if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) { - VERIFY(persist, fs_persist, "H5Pget_file_space_strategy"); - VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy"); - } - else { - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - } - - /* Retrieve and verify file space page size */ - ret = H5Pget_file_space_page_size(fcpl2, &fsp_size); - CHECK(ret, FAIL, "H5Pget_file_space_page_size"); - VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Release file creation property lists */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fcpl1); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fcpl2); - CHECK(ret, FAIL, "H5Pclose"); - } /* end for file space strategy type */ - } /* end for free-space section threshold */ - } /* end for fs_persist */ - - /* close fapl_ and remove the file */ -#if 0 - h5_clean_files(FILESPACE_NAME, my_fapl); -#endif - - H5E_BEGIN_TRY - { - H5Fdelete(FILESPACE_NAME[0], my_fapl); - } - H5E_END_TRY - } /* end for new_format */ - -} /* test_filespace_info() */ -#endif - -/**************************************************************** -** -** set_multi_split(): -** Internal routine to set up page-aligned address space for multi/split driver -** when testing paged aggregation. -** This is used by test_file_freespace() and test_sects_freespace(). -** -*****************************************************************/ -#if 0 -static int -set_multi_split(hid_t fapl, hsize_t pagesize, bool split) -{ - H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; - hid_t memb_fapl_arr[H5FD_MEM_NTYPES]; - char *memb_name[H5FD_MEM_NTYPES]; - haddr_t memb_addr[H5FD_MEM_NTYPES]; - bool relax; - H5FD_mem_t mt; - - assert(split); - - memset(memb_name, 0, sizeof memb_name); - - /* Get current split settings */ - if (H5Pget_fapl_multi(fapl, memb_map, memb_fapl_arr, memb_name, memb_addr, &relax) < 0) - TEST_ERROR; - - if (split) { - /* Set memb_addr aligned */ - memb_addr[H5FD_MEM_SUPER] = ((memb_addr[H5FD_MEM_SUPER] + pagesize - 1) / pagesize) * pagesize; - memb_addr[H5FD_MEM_DRAW] = ((memb_addr[H5FD_MEM_DRAW] + pagesize - 1) / pagesize) * pagesize; - } - else { - /* Set memb_addr aligned */ - for (mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; mt++) - memb_addr[mt] = ((memb_addr[mt] + pagesize - 1) / pagesize) * pagesize; - } /* end else */ - - /* Set multi driver with new FAPLs */ - if (H5Pset_fapl_multi(fapl, memb_map, memb_fapl_arr, (const char *const *)memb_name, memb_addr, relax) < - 0) - TEST_ERROR; - - /* Free memb_name */ - for (mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; mt++) - free(memb_name[mt]); - - return 0; - -error: - return (-1); - -} /* set_multi_split() */ -#endif - -/**************************************************************** -** -** test_file_freespace(): -** This routine checks the free space available in a file as -** returned by the public routine H5Fget_freespace(). -** -** -*****************************************************************/ -#if 0 -static void -test_file_freespace(const char *env_h5_drvr) -{ - hid_t file; /* File opened with read-write permission */ -#if 0 - h5_stat_size_t empty_filesize; /* Size of file when empty */ - h5_stat_size_t mod_filesize; /* Size of file after being modified */ - hssize_t free_space; /* Amount of free space in file */ -#endif - hid_t fcpl; /* File creation property list */ - hid_t fapl, new_fapl; /* File access property list IDs */ - hid_t dspace; /* Dataspace ID */ - hid_t dset; /* Dataset ID */ - hid_t dcpl; /* Dataset creation property list */ - int k; /* Local index variable */ - unsigned u; /* Local index variable */ - char filename[FILENAME_LEN]; /* Filename to use */ - char name[32]; /* Dataset name */ - unsigned new_format; /* To use old or new format */ - bool split_vfd, multi_vfd; /* Indicate multi/split driver */ - hsize_t expected_freespace; /* Freespace expected */ - hsize_t expected_fs_del; /* Freespace expected after delete */ - herr_t ret; /* Return value */ - - split_vfd = !strcmp(env_h5_drvr, "split"); - multi_vfd = !strcmp(env_h5_drvr, "multi"); - - if (!split_vfd && !multi_vfd) { - fapl = h5_fileaccess(); - h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); - - new_fapl = H5Pcopy(fapl); - CHECK(new_fapl, FAIL, "H5Pcopy"); - - /* Set the "use the latest version of the format" bounds */ - ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* Test with old & new format */ - for (new_format = false; new_format <= true; new_format++) { - hid_t my_fapl; - - /* Set the FAPL for the type of format */ - if (new_format) { - MESSAGE(5, ("Testing with new group format\n")); - - my_fapl = new_fapl; - - if (multi_vfd || split_vfd) { - ret = set_multi_split(new_fapl, FSP_SIZE_DEF, split_vfd); - CHECK(ret, FAIL, "set_multi_split"); - } - - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5P_set_file_space_strategy"); - - expected_freespace = 4534; - if (split_vfd) - expected_freespace = 427; - if (multi_vfd) - expected_freespace = 248; - expected_fs_del = 0; - } /* end if */ - else { - MESSAGE(5, ("Testing with old group format\n")); - /* Default: non-paged aggregation, non-persistent free-space */ - my_fapl = fapl; - expected_freespace = 2464; - if (split_vfd) - expected_freespace = 264; - if (multi_vfd) - expected_freespace = 0; - expected_fs_del = 4096; - - } /* end else */ - - /* Create an "empty" file */ - file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl); - CHECK(file, FAIL, "H5Fcreate"); - - ret = H5Fclose(file); - CHECK_I(ret, "H5Fclose"); -#if 0 - /* Get the "empty" file size */ - empty_filesize = h5_get_file_size(filename, H5P_DEFAULT); -#endif - /* Re-open the file (with read-write permission) */ - file = H5Fopen(filename, H5F_ACC_RDWR, my_fapl); - CHECK_I(file, "H5Fopen"); -#if 0 - /* Check that the free space is 0 */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, 0, "H5Fget_freespace"); -#endif - /* Create dataspace for datasets */ - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create datasets in file */ - for (u = 0; u < 10; u++) { - snprintf(name, sizeof(name), "Dataset %u", u); - dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ - - /* Close dataspace */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Check that there is the right amount of free space in the file */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, expected_freespace, "H5Fget_freespace"); -#endif - /* Delete datasets in file */ - for (k = 9; k >= 0; k--) { - snprintf(name, sizeof(name), "Dataset %u", (unsigned)k); - ret = H5Ldelete(file, name, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end for */ -#if 0 - /* Check that there is the right amount of free space in the file */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, expected_fs_del, "H5Fget_freespace"); -#endif - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -#if 0 - /* Get the file size after modifications*/ - mod_filesize = h5_get_file_size(filename, H5P_DEFAULT); - - /* Check that the file reverted to empty size */ - VERIFY(mod_filesize, empty_filesize, "H5Fget_freespace"); - - h5_clean_files(FILESPACE_NAME, my_fapl); -#endif - H5Fdelete(FILESPACE_NAME[0], my_fapl); - } /* end for */ - } - -} /* end test_file_freespace() */ - -/**************************************************************** -** -** test_sects_freespace(): -** This routine checks free-space section information for the -** file as returned by the public routine H5Fget_free_sections(). -** -*****************************************************************/ -static void -test_sects_freespace(const char *env_h5_drvr, bool new_format) -{ - char filename[FILENAME_LEN]; /* Filename to use */ - hid_t file; /* File ID */ - hid_t fcpl; /* File creation property list template */ - hid_t fapl; /* File access property list template */ -#if 0 - hssize_t free_space; /* Amount of free-space in the file */ -#endif - hid_t dspace; /* Dataspace ID */ - hid_t dset; /* Dataset ID */ - hid_t dcpl; /* Dataset creation property list */ - char name[32]; /* Dataset name */ - hssize_t nsects = 0; /* # of free-space sections */ - hssize_t nall; /* # of free-space sections for all types of data */ - hssize_t nmeta = 0, nraw = 0; /* # of free-space sections for meta/raw/generic data */ - H5F_sect_info_t sect_info[15]; /* Array to hold free-space information */ - H5F_sect_info_t all_sect_info[15]; /* Array to hold free-space information for all types of data */ - H5F_sect_info_t meta_sect_info[15]; /* Array to hold free-space information for metadata */ - H5F_sect_info_t raw_sect_info[15]; /* Array to hold free-space information for raw data */ - hsize_t total = 0; /* sum of the free-space section sizes */ - hsize_t tmp_tot = 0; /* Sum of the free-space section sizes */ - hsize_t last_size; /* Size of last free-space section */ - hsize_t dims[1]; /* Dimension sizes */ - unsigned u; /* Local index variable */ - H5FD_mem_t type; - bool split_vfd = false, multi_vfd = false; - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Fget_free_sections()--free-space section info in the file\n")); - - split_vfd = !strcmp(env_h5_drvr, "split"); - multi_vfd = !strcmp(env_h5_drvr, "multi"); - - if (!split_vfd && !multi_vfd) { - - fapl = h5_fileaccess(); - h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); - - /* Create file-creation template */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - if (new_format) { - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Set to paged aggregation and persistent free-space */ - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, true, (hsize_t)1); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* Set up paged aligned address space for multi/split driver */ - if (multi_vfd || split_vfd) { - ret = set_multi_split(fapl, FSP_SIZE_DEF, split_vfd); - CHECK(ret, FAIL, "set_multi_split"); - } - } - else { - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, true, (hsize_t)1); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - } - - /* Create the file */ - file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create 1 large dataset */ - dims[0] = 1200; - dspace = H5Screate_simple(1, dims, NULL); - dset = H5Dcreate2(file, "Dataset_large", H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - /* Close dataset */ - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataspace */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for datasets */ - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); - - /* Create datasets in file */ - for (u = 0; u < 10; u++) { - snprintf(name, sizeof(name), "Dataset %u", u); - dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ - - /* Close dataspace */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Delete odd-numbered datasets in file */ - for (u = 0; u < 10; u++) { - snprintf(name, sizeof(name), "Dataset %u", u); - if (u % 2) { - ret = H5Ldelete(file, name, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end if */ - } /* end for */ - - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file with read-only permission */ - file = H5Fopen(filename, H5F_ACC_RDONLY, fapl); - CHECK_I(file, "H5Fopen"); -#if 0 - /* Get the amount of free space in the file */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); -#endif - /* Get the total # of free-space sections in the file */ - nall = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL); - CHECK(nall, FAIL, "H5Fget_free_sections"); - - /* Should return failure when nsects is 0 with a nonnull sect_info */ - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info); - VERIFY(nsects, FAIL, "H5Fget_free_sections"); - - /* Retrieve and verify free space info for all the sections */ - memset(all_sect_info, 0, sizeof(all_sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)nall, all_sect_info); - VERIFY(nsects, nall, "H5Fget_free_sections"); - - /* Verify the amount of free-space is correct */ - for (u = 0; u < nall; u++) - total += all_sect_info[u].size; -#if 0 - VERIFY(free_space, total, "H5Fget_free_sections"); -#endif - /* Save the last section's size */ - last_size = all_sect_info[nall - 1].size; - - /* Retrieve and verify free space info for -1 sections */ - memset(sect_info, 0, sizeof(sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall - 1), sect_info); - VERIFY(nsects, nall, "H5Fget_free_sections"); - - /* Verify the amount of free-space is correct */ - total = 0; - for (u = 0; u < (nall - 1); u++) { - VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); - VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); - total += sect_info[u].size; - } -#if 0 - VERIFY(((hsize_t)free_space - last_size), total, "H5Fget_free_sections"); -#endif - /* Retrieve and verify free-space info for +1 sections */ - memset(sect_info, 0, sizeof(sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall + 1), sect_info); - VERIFY(nsects, nall, "H5Fget_free_sections"); - - /* Verify amount of free-space is correct */ - total = 0; - for (u = 0; u < nall; u++) { - VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); - VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); - total += sect_info[u].size; - } - VERIFY(sect_info[nall].addr, 0, "H5Fget_free_sections"); - VERIFY(sect_info[nall].size, 0, "H5Fget_free_sections"); -#if 0 - VERIFY(free_space, total, "H5Fget_free_sections"); -#endif - - memset(meta_sect_info, 0, sizeof(meta_sect_info)); - if (multi_vfd) { - hssize_t ntmp; - - for (type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; type++) { - if (type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP) - continue; - /* Get the # of free-space sections in the file for metadata */ - ntmp = H5Fget_free_sections(file, type, (size_t)0, NULL); - CHECK(ntmp, FAIL, "H5Fget_free_sections"); - - if (ntmp > 0) { - nsects = H5Fget_free_sections(file, type, (size_t)ntmp, &meta_sect_info[nmeta]); - VERIFY(nsects, ntmp, "H5Fget_free_sections"); - nmeta += ntmp; - } - } - } - else { - /* Get the # of free-space sections in the file for metadata */ - nmeta = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)0, NULL); - CHECK(nmeta, FAIL, "H5Fget_free_sections"); - - /* Retrieve and verify free-space sections for metadata */ - nsects = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)nmeta, meta_sect_info); - VERIFY(nsects, nmeta, "H5Fget_free_sections"); - } - - /* Get the # of free-space sections in the file for raw data */ - nraw = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)0, NULL); - CHECK(nraw, FAIL, "H5Fget_free_sections"); - - /* Retrieve and verify free-space sections for raw data */ - memset(raw_sect_info, 0, sizeof(raw_sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)nraw, raw_sect_info); - VERIFY(nsects, nraw, "H5Fget_free_sections"); - - /* Sum all the free-space sections */ - for (u = 0; u < nmeta; u++) - tmp_tot += meta_sect_info[u].size; - - for (u = 0; u < nraw; u++) - tmp_tot += raw_sect_info[u].size; - - /* Verify free-space info */ - VERIFY(nmeta + nraw, nall, "H5Fget_free_sections"); - VERIFY(tmp_tot, total, "H5Fget_free_sections"); - - /* Closing */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Pclose(fcpl); - CHECK(fcpl, FAIL, "H5Pclose"); -#if 0 - h5_clean_files(FILESPACE_NAME, fapl); -#endif - H5Fdelete(FILESPACE_NAME[0], fapl); - } - -} /* end test_sects_freespace() */ -#endif - -/**************************************************************** -** -** test_filespace_compatible(): -** Verify that the trunk with the latest file space management -** can open, read and modify 1.6 HDF5 file and 1.8 HDF5 file. -** Also verify the correct file space handling information -** and the amount of free space. -** -****************************************************************/ -#if 0 -static void -test_filespace_compatible(void) -{ - int fd_old = (-1), fd_new = (-1); /* File descriptors for copying data */ - hid_t fid = -1; /* File id */ - hid_t did = -1; /* Dataset id */ - hid_t fcpl; /* File creation property list template */ - int check[100]; /* Temporary buffer for verifying dataset data */ - int rdbuf[100]; /* Temporary buffer for reading in dataset data */ - uint8_t buf[READ_OLD_BUFSIZE]; /* temporary buffer for reading */ - ssize_t nread; /* Number of bytes read in */ - unsigned i, j; /* Local index variable */ - hssize_t free_space; /* Amount of free-space in the file */ - bool persist; /* Persist free-space or not */ - hsize_t threshold; /* Free-space section threshold */ - H5F_fspace_strategy_t strategy; /* File space handling strategy */ - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("File space compatibility testing for 1.6 and 1.8 files\n")); - - for (j = 0; j < NELMTS(OLD_FILENAME); j++) { - const char *filename = H5_get_srcdir_filename(OLD_FILENAME[j]); /* Corrected test file name */ - - /* Open and copy the test file into a temporary file */ - fd_old = HDopen(filename, O_RDONLY); - CHECK(fd_old, FAIL, "HDopen"); - fd_new = HDopen(FILE5, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); - CHECK(fd_new, FAIL, "HDopen"); - - /* Copy data */ - while ((nread = HDread(fd_old, buf, (size_t)READ_OLD_BUFSIZE)) > 0) { - ssize_t write_err = HDwrite(fd_new, buf, (size_t)nread); - CHECK(write_err, -1, "HDwrite"); - } /* end while */ - - /* Close the files */ - ret = HDclose(fd_old); - CHECK(ret, FAIL, "HDclose"); - ret = HDclose(fd_new); - CHECK(ret, FAIL, "HDclose"); - - /* Open the temporary test file */ - fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* There should not be any free space in the file */ - free_space = H5Fget_freespace(fid); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, (hssize_t)0, "H5Fget_freespace"); - - /* Get the file's file creation property list */ - fcpl = H5Fget_create_plist(fid); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); - - /* Retrieve the file space info */ - ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - /* File space handling strategy should be H5F_FSPACE_STRATEGY_FSM_AGGR */ - /* Persisting free-space should be false */ - /* Free-space section threshold should be 1 */ - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - - /* Generate raw data */ - for (i = 0; i < 100; i++) - check[i] = (int)i; - - /* Open and read the dataset */ - did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read is correct */ - for (i = 0; i < 100; i++) - VERIFY(rdbuf[i], check[i], "test_compatible"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Remove the dataset */ - ret = H5Ldelete(fid, DSETNAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close the plist */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-Open the file */ - fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* The dataset should not be there */ - did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); - VERIFY(did, FAIL, "H5Dopen"); - - /* There should not be any free space in the file */ - free_space = H5Fget_freespace(fid); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, (hssize_t)0, "H5Fget_freespace"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ -} /* test_filespace_compatible */ -#endif - -/**************************************************************** -** -** test_filespace_1.10.0_compatible(): -** Verify that the latest file space management can open, read and -** modify 1.10.0 HDF5 files : -** h5fc_ext1_i.h5: H5F_FILE_SPACE_ALL, default threshold; has superblock extension but no fsinfo message -** h5fc_ext1_f.h5: H5F_FILE_SPACE_ALL_PERSIST, default threshold; has superblock extension with fsinfo -*message -** h5fc_ext2_if.h5: H5F_FILE_SPACE_ALL, non-default threshold; has superblock extension with fsinfo -*message -** h5fc_ext2_sf.h5: H5F_FILE_SPACE_VFD, default threshold; has superblock extension with fsinfo message -** h5fc_ext3_isf.h5: H5F_FILE_SPACE_AGGR_VFD, default threshold; has superblock extension with fsinfo -*message -** h5fc_ext_none.h5: H5F_FILE_SPACE_ALL, default threshold; without superblock extension -** The above files are copied from release 1.10.0 tools/h5format_convert/testfiles. -** -****************************************************************/ -#if 0 -static void -test_filespace_1_10_0_compatible(void) -{ - hid_t fid = -1; /* File id */ - hid_t did = -1; /* Dataset id */ - hid_t fcpl; /* File creation property list */ - bool persist; /* Persist free-space or not */ - hsize_t threshold; /* Free-space section threshold */ - H5F_fspace_strategy_t strategy; /* File space handling strategy */ - int wbuf[24]; /* Buffer for dataset data */ - int rdbuf[24]; /* Buffer for dataset data */ - int status; /* Status from copying the existing file */ - unsigned i, j; /* Local index variable */ - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("File space compatibility testing for 1.10.0 files\n")); - - for (j = 0; j < NELMTS(OLD_1_10_0_FILENAME); j++) { - /* Make a copy of the test file */ - status = h5_make_local_copy(OLD_1_10_0_FILENAME[j], FILE5); - CHECK(status, FAIL, "h5_make_local_copy"); - - /* Open the temporary test file */ - fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Get the file's file creation property list */ - fcpl = H5Fget_create_plist(fid); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); - - /* Retrieve the file space info */ - ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - - switch (j) { - case 0: -#if 0 - VERIFY(strategy, H5F_FILE_SPACE_STRATEGY_DEF, "H5Pget_file_space_strategy"); - VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); - VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); -#endif - /* Open the dataset */ - did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - for (i = 0; i < 24; i++) - wbuf[i] = (int)j + 1; - - /* Write to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 1: - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); - VERIFY(persist, true, "H5Pget_file_space_strategy"); -#if 0 - VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); -#endif - - /* Open the dataset */ - did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - for (i = 0; i < 24; i++) - wbuf[i] = (int)j + 1; - - /* Write to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 2: - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); -#if 0 - VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); -#endif - VERIFY(threshold, 2, "H5Pget_file_space_strategy"); - - /* Open the dataset */ - did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - for (i = 0; i < 24; i++) - wbuf[i] = (int)j + 1; - - /* Write to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 3: - VERIFY(strategy, H5F_FSPACE_STRATEGY_NONE, "H5Pget_file_space_strategy"); -#if 0 - VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); - VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); -#endif - /* Open the dataset */ - did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - for (i = 0; i < 24; i++) - wbuf[i] = (int)j + 1; - - /* Write to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 4: - VERIFY(strategy, H5F_FSPACE_STRATEGY_AGGR, "H5Pget_file_space_strategy"); -#if 0 - VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); - VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); -#endif - /* Open the dataset */ - did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - for (i = 0; i < 24; i++) - wbuf[i] = (int)j + 1; - - /* Write to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - case 5: - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); -#if 0 - VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy"); - VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy"); -#endif - /* Open the dataset */ - did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - for (i = 0; i < 24; i++) - wbuf[i] = (int)j + 1; - - /* Write to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - default: - break; - } - - /* Close the plist */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-Open the file */ - fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - switch (j) { - case 0: - /* Open and read the dataset */ - did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read is correct */ - for (i = 0; i < 24; i++) - VERIFY(rdbuf[i], j + 1, "test_compatible"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 1: - /* Open and read the dataset */ - did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read is correct */ - for (i = 0; i < 24; i++) - VERIFY(rdbuf[i], j + 1, "test_compatible"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 2: - /* Open and read the dataset */ - did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read is correct */ - for (i = 0; i < 24; i++) - VERIFY(rdbuf[i], j + 1, "test_compatible"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 3: - /* Open and read the dataset */ - did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read is correct */ - for (i = 0; i < 24; i++) - VERIFY(rdbuf[i], j + 1, "test_compatible"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 4: - - /* Open and read the dataset */ - did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read is correct */ - for (i = 0; i < 24; i++) - VERIFY(rdbuf[i], j + 1, "test_compatible"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - case 5: - - /* Open and read the dataset */ - did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen"); - - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read is correct */ - for (i = 0; i < 24; i++) - VERIFY(rdbuf[i], j + 1, "test_compatible"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - break; - - default: - break; - } - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - -} /* test_filespace_1_10_0_compatible */ -#endif - -/**************************************************************** -** -** test_filespace_round_compatible(): -** Verify that the trunk can open, read and modify these files-- -** 1) They are initially created (via gen_filespace.c) in the trunk -** with combinations of file space strategies, default/non-default -** threshold, and file spacing paging enabled/disabled. -** The library creates the file space info message with -** "mark if unknown" in these files. -** 2) They are copied to the 1.8 branch, and are opened/read/modified -** there via test_filespace_compatible() in test/tfile.c. -** The 1.8 library marks the file space info message as "unknown" -** in these files. -** 3) They are then copied back from the 1.8 branch to the trunk for -** compatibility testing via this routine. -** 4) Upon encountering the file space info message which is marked -** as "unknown", the library will use the default file space management -** from then on: non-persistent free-space managers, default threshold, -** and non-paging file space. -** -****************************************************************/ -#if 0 -static void -test_filespace_round_compatible(void) -{ - hid_t fid = -1; /* File id */ - hid_t fcpl = -1; /* File creation property list ID */ - unsigned j; /* Local index variable */ - H5F_fspace_strategy_t strategy; /* File space strategy */ - bool persist; /* Persist free-space or not */ - hsize_t threshold; /* Free-space section threshold */ - hssize_t free_space; /* Amount of free space in the file */ - int status; /* Status from copying the existing file */ - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("File space compatibility testing for files from trunk to 1_8 to trunk\n")); - - for (j = 0; j < NELMTS(FSPACE_FILENAMES); j++) { - /* Make a copy of the test file */ - status = h5_make_local_copy(FSPACE_FILENAMES[j], FILE5); - CHECK(status, FAIL, "h5_make_local_copy"); - - /* Open the temporary test file */ - fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Get the file's creation property list */ - fcpl = H5Fget_create_plist(fid); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); - - ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy"); - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - - /* There should not be any free space in the file */ - free_space = H5Fget_freespace(fid); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, (hssize_t)0, "H5Fget_freespace"); - - /* Closing */ - ret = H5Fclose(fid); - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - -} /* test_filespace_round_compatible */ - -/**************************************************************** -** -** test_libver_bounds_real(): -** Verify that a file created and modified with the -** specified libver bounds has the specified object header -** versions for the right objects. -** -****************************************************************/ -static void -test_libver_bounds_real(H5F_libver_t libver_create, unsigned oh_vers_create, H5F_libver_t libver_mod, - unsigned oh_vers_mod) -{ - hid_t file, group; /* Handles */ - hid_t fapl; /* File access property list */ - H5O_native_info_t ninfo; /* Object info */ - herr_t ret; /* Return value */ - - /* - * Create a new file using the creation properties. - */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - ret = H5Pset_libver_bounds(fapl, libver_create, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - file = H5Fcreate("tfile5.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* - * Make sure the root group has the correct object header version - */ - ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name"); - - /* - * Reopen the file and make sure the root group still has the correct version - */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Pset_libver_bounds(fapl, libver_mod, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - file = H5Fopen("tfile5.h5", H5F_ACC_RDWR, fapl); - CHECK(file, FAIL, "H5Fopen"); - - ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name"); - - /* - * Create a group named "G1" in the file, and make sure it has the correct - * object header version - */ - group = H5Gcreate2(file, "/G1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, FAIL, "H5Gcreate"); - - //! [H5Oget_native_info_snip] - - ret = H5Oget_native_info(group, &ninfo, H5O_NATIVE_INFO_HDR); - - //! [H5Oget_native_info_snip] - - CHECK(ret, FAIL, "H5Oget_native)info"); - VERIFY(ninfo.hdr.version, oh_vers_mod, "H5Oget_native_info"); - - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* - * Create a group named "/G1/G3" in the file, and make sure it has the - * correct object header version - */ - group = H5Gcreate2(file, "/G1/G3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, FAIL, "H5Gcreate"); - - ret = H5Oget_native_info(group, &ninfo, H5O_NATIVE_INFO_HDR); - CHECK(ret, FAIL, "H5Oget_native_info"); - VERIFY(ninfo.hdr.version, oh_vers_mod, "H5Oget_native_info"); - - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - //! [H5Oget_native_info_by_name_snip] - - /* - * Make sure the root group still has the correct object header version - */ - ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - - //! [H5Oget_native_info_by_name_snip] - - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); -} /* end test_libver_bounds_real() */ -#endif - -/*------------------------------------------------------------------------- - * Function: test_libver_bounds_open - * - * Purpose: Tests opening latest file with various low/high bounds. - * - * Return: Success: 0 - * Failure: number of errors - * - *------------------------------------------------------------------------- - */ -#if 0 -#define VERBFNAME "tverbounds_dspace.h5" -#define VERBDSNAME "dataset 1" -#define SPACE1_DIM1 3 -static void -test_libver_bounds_open(void) -{ - hid_t file = -1; /* File ID */ - hid_t space = -1; /* Dataspace ID */ - hid_t dset = -1; /* Dataset ID */ - hid_t fapl = -1; /* File access property list ID */ - hid_t new_fapl = -1; /* File access property list ID for reopened file */ - hid_t dcpl = -1; /* Dataset creation property list ID */ - hsize_t dim[1] = {SPACE1_DIM1}; /* Dataset dimensions */ - H5F_libver_t low, high; /* File format bounds */ - hsize_t chunk_dim[1] = {SPACE1_DIM1}; /* Chunk dimensions */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Opening File in Various Version Bounds\n")); - - /* Create a file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Create dataspace */ - space = H5Screate_simple(1, dim, NULL); - CHECK(space, FAIL, "H5Screate_simple"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Create and set chunk plist */ - ret = H5Pset_chunk(dcpl, 1, chunk_dim); - CHECK(ret, FAIL, "H5Pset_chunk"); - ret = H5Pset_deflate(dcpl, 9); - CHECK(ret, FAIL, "H5Pset_deflate"); - ret = H5Pset_chunk_opts(dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); - CHECK(ret, FAIL, "H5Pset_chunk_opts"); - - /* Create a file with (LATEST, LATEST) bounds, create a layout version 4 - dataset, then close the file */ - - /* Set version bounds to (LATEST, LATEST) */ - low = H5F_LIBVER_LATEST; - high = H5F_LIBVER_LATEST; - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the file */ - file = H5Fcreate(VERBFNAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create dataset */ - dset = H5Dcreate2(file, VERBDSNAME, H5T_NATIVE_INT, space, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - /* Close dataset and file */ - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Attempt to open latest file with (earliest, v18), should fail */ - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18); - H5E_BEGIN_TRY - { - file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl); - } - H5E_END_TRY - VERIFY(file, FAIL, "Attempted to open latest file with earliest version"); - - /* Attempt to open latest file with (v18, v18), should fail */ - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_V18, H5F_LIBVER_V18); - H5E_BEGIN_TRY - { - file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl); - } - H5E_END_TRY - VERIFY(file, FAIL, "Attempted to open latest file with v18 bounds"); - - /* Opening VERBFNAME in these combination should succeed. - For each low bound, verify that it is upgraded properly */ - high = H5F_LIBVER_LATEST; - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - H5F_libver_t new_low = H5F_LIBVER_EARLIEST; - - /* Set version bounds for opening file */ - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Open the file */ - file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl); - CHECK(file, FAIL, "H5Fopen"); - - /* Get the new file access property */ - new_fapl = H5Fget_access_plist(file); - CHECK(new_fapl, FAIL, "H5Fget_access_plist"); - - /* Get new low bound and verify that it has been upgraded properly */ - ret = H5Pget_libver_bounds(new_fapl, &new_low, NULL); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - VERIFY(new_low >= H5F_LIBVER_V110, true, "Low bound should be upgraded to at least H5F_LIBVER_V110"); - - ret = H5Pclose(new_fapl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - } /* for low */ - - /* Close dataspace and property lists */ - ret = H5Sclose(space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); -} /* end test_libver_bounds_open() */ -#endif - -/*------------------------------------------------------------------------- - * Function: test_libver_bounds_copy - * - * Purpose: Test to verify HDFFV-10800 is fixed: - * This test is copied from the user test program: copy10.c. - * (See attached programs in the jira issue.) - * - * The source file used in the test is generated by the user test - * program "fill18.c" with the 1.8 library. The file is created - * with the latest format and the dataset created in the file - * has version 3 fill value message (latest). - * - * The test creates the destination file with (v18, v18) version bounds. - * H5Ocopy() should succeed in copying the dataset in the source file - * to the destination file. - * - * Return: Success: 0 - * Failure: number of errors - * - *------------------------------------------------------------------------- - */ -#if 0 -static void -test_libver_bounds_copy(void) -{ - hid_t src_fid = -1; /* File ID */ - hid_t dst_fid = -1; /* File ID */ - hid_t fapl = -1; /* File access property list ID */ - const char *src_fname; /* Source file name */ - herr_t ret; /* Generic return value */ - bool driver_is_default_compatible; - - /* Output message about the test being performed */ - MESSAGE(5, ("Testing H5Ocopy a dataset in a 1.8 library file to a 1.10 library file\n")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK_I(ret, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - /* Get the test file name */ - src_fname = H5_get_srcdir_filename(SRC_FILE); - - /* Open the source test file */ - src_fid = H5Fopen(src_fname, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(src_fid, FAIL, "H5Fopen"); - - /* Create file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Set library version bounds to (v18, v18) */ - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_V18, H5F_LIBVER_V18); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the destination file with the fapl */ - dst_fid = H5Fcreate(DST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(dst_fid, FAIL, "H5Pcreate"); - - /* Close the fapl */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Copy the dataset in the source file to the destination file */ - ret = H5Ocopy(src_fid, DSET_DS1, dst_fid, DSET_DS1, H5P_DEFAULT, H5P_DEFAULT); - VERIFY(ret, SUCCEED, "H5Ocopy"); - - /* Close the source file */ - ret = H5Fclose(src_fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close the destination file */ - ret = H5Fclose(dst_fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Remove the destination file */ - H5Fdelete(DST_FILE, H5P_DEFAULT); - -} /* end test_libver_bounds_copy() */ -#endif - -/**************************************************************** -** -** test_libver_bounds(): -** Verify that a file created and modified with various -** libver bounds is handled correctly. (Further testing -** welcome) -** -****************************************************************/ -#if 0 -static void -test_libver_bounds(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing setting library version bounds\n")); - - /* Run the tests */ - test_libver_bounds_real(H5F_LIBVER_EARLIEST, 1, H5F_LIBVER_LATEST, 2); - test_libver_bounds_real(H5F_LIBVER_LATEST, 2, H5F_LIBVER_EARLIEST, 2); - test_libver_bounds_open(); -#if 0 - test_libver_bounds_copy(); -#endif -} /* end test_libver_bounds() */ -#endif - -/************************************************************************************** -** -** test_libver_bounds_low_high(): -** Tests to verify that format versions are correct with the following five -** pairs of low/high version bounds set in fapl via H5Pset_libver_bounds(): -** (1) (earliest, v18) -** (2) (earliest, v110) -** (3) (v18, v18) -** (4) (v18, v110) -** (5) (v110, v110) -** -** For each pair of setting in fapl, verify format versions with the following -** six tests: -** (1) test_libver_bounds_super(fapl): superblock versions -** (2) test_libver_bounds_obj(fapl): object header versions -** (3) test_libver_bounds_dataset(fapl): message versions associated with dataset -** (4) test_libver_bounds_dataspace(fapl): dataspace message versions -** (5) test_libver_bounds_datatype(fapl): datatype message versions -** (6) test_libver_bounds_attributes(fapl): attribute message versions -** -**************************************************************************************/ -#if 0 -static void -test_libver_bounds_low_high(const char *env_h5_drvr) -{ - hid_t fapl = H5I_INVALID_HID; /* File access property list */ - H5F_libver_t low, high; /* Low and high bounds */ - herr_t ret; /* The return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing setting (low, high) format version bounds\n")); - - /* Create a file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); - - /* Loop through all the combinations of low/high version bounds */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - - H5E_BEGIN_TRY - { - /* Set the low/high version bounds */ - ret = H5Pset_libver_bounds(fapl, low, high); - } - H5E_END_TRY - - /* Should fail: invalid combinations */ - if (high == H5F_LIBVER_EARLIEST) { - VERIFY(ret, FAIL, "H5Pset_libver_bounds"); - continue; - } - - /* Should fail: invalid combinations */ - if (high < low) { - VERIFY(ret, FAIL, "H5Pset_libver_bounds"); - continue; - } - - /* All other combinations are valid and should succeed */ - VERIFY(ret, SUCCEED, "H5Pset_libver_bounds"); - - /* Tests to verify version bounds */ - test_libver_bounds_super(fapl, env_h5_drvr); - test_libver_bounds_obj(fapl); - test_libver_bounds_dataset(fapl); - test_libver_bounds_dataspace(fapl); - test_libver_bounds_datatype(fapl); - test_libver_bounds_attributes(fapl); - } - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_libver_bounds_low_high() */ -#endif - -/*********************************************************************** -** -** test_libver_bounds_super(): -** Verify superblock version with the following two tests: -** (1) test_libver_bounds_super_create(): -** --when creating a file with the input fapl and the fcpl -** that has the following feature enabled: -** (A) default fcpl -** (B) fcpl with v1-btee K value enabled -** (C) fcpl with shared messages enabled -** (D) fcpl with persistent free-space manager enabled -** -** (2) test_libver_bounds_super_open(): -** --when opening a file which is created with the input fapl -** and the fcpl setting as #A to #D above. -** -** These two tests are run with or without SWMR file access. -** -*************************************************************************/ -#if 0 -static void -test_libver_bounds_super(hid_t fapl, const char *env_h5_drvr) -{ - hid_t fcpl = H5I_INVALID_HID; /* File creation property list */ - herr_t ret; /* The return value */ - - /* Create a default fcpl: #A */ - /* This will result in superblock version 0 */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); - - /* Verify superblock version when creating a file with input fapl, - fcpl #A and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_create(fapl, fcpl, true, false); - test_libver_bounds_super_create(fapl, fcpl, false, false); - - /* Verify superblock version when opening a file which is created - with input fapl, fcpl #A and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_open(fapl, fcpl, true, false); - test_libver_bounds_super_open(fapl, fcpl, false, false); - - /* Close the fcpl */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Create a fcpl with v1-btree K value enabled: #B */ - /* This will result in superblock version 1 */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_istore_k(fcpl, 64); - CHECK(ret, FAIL, "H5Pset_istore_k"); - - /* Verify superblock version when creating a file with input fapl, - fcpl #B and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_create(fapl, fcpl, true, false); - test_libver_bounds_super_create(fapl, fcpl, false, false); - - /* Verify superblock version when opening a file which is created - with input fapl, fcpl #B and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_open(fapl, fcpl, true, false); - test_libver_bounds_super_open(fapl, fcpl, false, false); - - /* Close the fcpl */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Create a fcpl with shared messages enabled: #C */ - /* This will result in superblock version 2 */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_shared_mesg_nindexes(fcpl, 1); - CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_ATTR_FLAG, 2); - CHECK(ret, FAIL, "H5Pset_shared_mesg_index"); - - /* Verify superblock version when creating a file with input fapl, - fcpl #C and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_create(fapl, fcpl, true, false); - test_libver_bounds_super_create(fapl, fcpl, false, false); - - /* Verify superblock version when opening a file which is created - with input fapl, fcpl #C and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_open(fapl, fcpl, true, false); - test_libver_bounds_super_open(fapl, fcpl, false, false); - - /* Close the fcpl */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - if (h5_using_default_driver(env_h5_drvr)) { - /* Create a fcpl with persistent free-space manager enabled: #D */ - /* This will result in superblock version 2 */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, 1, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space"); - - /* Verify superblock version when creating a file with input fapl, - fcpl #D and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_create(fapl, fcpl, true, true); - test_libver_bounds_super_create(fapl, fcpl, false, true); - - /* Verify superblock version when opening a file which is created - with input fapl, fcpl #D and with/without SWMR access */ - if (H5FD__supports_swmr_test(env_h5_drvr)) - test_libver_bounds_super_open(fapl, fcpl, true, true); - test_libver_bounds_super_open(fapl, fcpl, false, true); - - /* Close the fcpl */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - } - -} /* end test_libver_bounds_super() */ - -/************************************************************************************************** -** -** test_libver_bounds_super_create(): -** Verify the following when the file is created with the input fapl, fcpl, -** and with/without SWMR access: -** (a) the superblock version # -** (b) the file's low bound setting -** (c) fail or succeed in creating the file -** -** For file creation, the bounds setting in fapl, the feature enabled in fcpl, -** and with/without SWMR file access will determine the results for #a to #c. -** -** The first row for the following two tables is the 5 pairs of low/high bounds setting -** in the input fapl. The next three rows list the expected results for #a to #c. -** "-->" indicates "upgrade to" -** -** The last table lists the expected results in creating the file when non-default -** free-space info (fsinfo) is enabled in fcpl. -** -** Creating a file with write access -** -------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | -** |______________________________________________________________________________| -** Superblock version | vers 0, 1, 2 | vers 0, 1, 2 | vers 2 | vers 2 | vers 3 | -** |------------------------------------------------------------------------------| -** File's low bound | no change | -** |------------------------------------------------------------------------------| -** File creation | succeed | -** |______________________________________________________________________________| -** -** Creating a file with SWMR-write access -** -------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | -** |______________________________________________________________________________| -** Superblock version | -- | vers 3 | -- | vers 3 | vers 3 | -** |------------------------------------------------------------------------------| -** File's low bound | -- | ->v110 | -- | ->v110 | no change | -** |------------------------------------------------------------------------------| -** File creation | fail | succeed | fail | succeed | succeed | -** |______________________________________________________________________________| -** -** Creating a file with write/SWMR-write access + non-default fsinfo -** -------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | -** |______________________________________________________________________________| -** File creation | fail | succeed | fail | succeed | succeed | -** |______________________________________________________________________________| -** -******************************************************************************************************/ -static void -test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ -#if 0 - H5F_t *f = NULL; /* Internal file pointer */ -#endif - H5F_libver_t low, high; /* Low and high bounds */ -#if 0 - bool ok; /* The result is ok or not */ -#endif - herr_t ret; /* The return value */ - - /* Try to create the file */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC | (is_swmr ? H5F_ACC_SWMR_WRITE : 0), fcpl, fapl); - } - H5E_END_TRY - -#if 0 - /* Get the internal file pointer if the create succeeds */ - if (fid >= 0) { - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - } -#endif - /* Retrieve the low/high bounds */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - if (non_def_fsm && high < H5F_LIBVER_V110) - VERIFY(fid, H5I_INVALID_HID, "H5Fcreate"); - - else if (is_swmr) { /* SWMR is enabled */ - if (high >= H5F_LIBVER_V110) { /* Should succeed */ - VERIFY(fid >= 0, true, "H5Fcreate"); -#if 0 - VERIFY(HDF5_SUPERBLOCK_VERSION_3, f->shared->sblock->super_vers, "HDF5_superblock_ver_bounds"); - VERIFY(f->shared->low_bound >= H5F_LIBVER_V110, true, "HDF5_superblock_ver_bounds"); -#endif - } - else /* Should fail */ - VERIFY(fid >= 0, false, "H5Fcreate"); - } - else { /* Should succeed */ - VERIFY(fid >= 0, true, "H5Fcreate"); -#if 0 - VERIFY(low, f->shared->low_bound, "HDF5_superblock_ver_bounds"); - - switch (low) { - case H5F_LIBVER_EARLIEST: - ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_DEF || - f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_1 || - f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_2); - VERIFY(ok, true, "HDF5_superblock_ver_bounds"); - break; - - case H5F_LIBVER_V18: - ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_2); - VERIFY(ok, true, "HDF5_superblock_ver_bounds"); - break; - - case H5F_LIBVER_V110: - case H5F_LIBVER_V112: - case H5F_LIBVER_V114: - case H5F_LIBVER_V116: - ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_3); - VERIFY(ok, true, "HDF5_superblock_ver_bounds"); - break; - - case H5F_LIBVER_ERROR: - case H5F_LIBVER_NBOUNDS: - default: - ERROR("H5Pget_libver_bounds"); - - } /* end switch */ -#endif - } /* end else */ - - if (fid >= 0) { /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } - -} /* end test_libver_bounds_super_create() */ - -/************************************************************************************************** -** -** test_libver_bounds_super_open(): -** Verify the following when opening a file which is created with the input fapl, fcpl, -** and with/without SWMR access: -** (a) the file's low bound setting -** (b) fail or succeed in opening the file -** -** (1) Create a file with the input fapl, fcpl and with/without SWMR access -** (2) Close the file -** (3) Reopen the file with a new fapl that is set to the 5 pairs of low/high bounds -** in a for loop. For each pair of setting in the new fapl: -** --Verify the expected results for #a and #b above. -** --Close the file. -** -** For file open, the file's superblock version, the low/high bounds setting in fapl, -** and with/without SWMR file access will determine the results for #a and #b. -** -** The first row for the following tables (#A - #B) is the 5 pairs of low/high bounds setting -** in the input fapl. The next two rows list the expected results for #a and #b. -** "-->" indicates "upgrade to" -** -** The last table (#C) lists the expected results in opening the file when non-default -** free-space info (fsinfo) is enabled in fcpl. -** -** (A) Opening a file with write access -** -** Superblock version 0, 1 -** -------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | -** |______________________________________________________________________________| -** File's low bound | no change | -** |------------------------------------------------------------------------------| -** File open | succeed | -** |______________________________________________________________________________| -** -** -** Superblock version 2 -** -------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | -** |______________________________________________________________________________| -** File's low bound | -->v18 | no change | -** |------------------------------------------------------------------------------| -** File open | succeed | -** |______________________________________________________________________________| -** -** Superblock version 3 -** -------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) | -** |______________________________________________________________________________| -** File's low bound | -- | -->v110 | -- | -->v110 | no change | -** |------------------------------------------------------------------------------| -** File open | fail | succeed | fail | succeed | succeed | -** |______________________________________________________________________________| -** -** -** -** (B) Opening a file with SWMR-write access -** -** Superblock version 0, 1, 2 -** ------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) | -** |_____________________________________________________________________________| -** File's low bound | ---- -** |-----------------------------------------------------------------------------| -** File open | fail -** |_____________________________________________________________________________| -** -** -** Superblock version 3 -** ------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) | -** |_____________________________________________________________________________| -** File's low bound | -- | -->v110 | -- | -->v110 | no change | -** |-----------------------------------------------------------------------------| -** File open | fail | succeed | fail | succeed | succeed | -** |_____________________________________________________________________________| -** -** -** (C) Opening a file with write/SWMR-write access + non-default fsinfo -** ------------------------------------------------------------------------------- -** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) | -** |_____________________________________________________________________________| -** File open | fail | succeed | fail | succeed | succeed | -** |_____________________________________________________________________________| -** -** -******************************************************************************************************/ -static void -test_libver_bounds_super_open(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ -#if 0 - H5F_t *f = NULL; /* Internal file pointer */ -#endif - hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ -#if 0 - unsigned super_vers; /* Superblock version */ -#endif - H5F_libver_t low, high; /* Low and high bounds */ - herr_t ret; /* Return value */ - - /* Create the file with the input fcpl and fapl */ - H5E_BEGIN_TRY - { - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - - /* Retrieve the low/high bounds */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - if (non_def_fsm && high < H5F_LIBVER_V110) { - VERIFY(fid, H5I_INVALID_HID, "H5Fcreate"); - } - else { - VERIFY(fid >= 0, true, "H5Fcreate"); -#if 0 - /* Get the internal file pointer */ - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - - /* The file's superblock version */ - super_vers = f->shared->sblock->super_vers; -#endif - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a default file access property list */ - new_fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(new_fapl, FAIL, "H5Pcreate"); - - /* Loop through all the combinations of low/high bounds in new_fapl */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - H5E_BEGIN_TRY - { - ret = H5Pset_libver_bounds(new_fapl, low, high); - } - H5E_END_TRY - - /* Invalid combinations */ - if (ret < 0) - continue; - - /* Open the file with or without SWMR access */ - H5E_BEGIN_TRY - { - fid = H5Fopen(FILE8, H5F_ACC_RDWR | (is_swmr ? H5F_ACC_SWMR_WRITE : 0), new_fapl); - } - H5E_END_TRY - - if (non_def_fsm && high < H5F_LIBVER_V110) { - VERIFY(fid, H5I_INVALID_HID, "H5Fopen"); - continue; - } -#if 0 - /* Get the internal file pointer if the open succeeds */ - if (fid >= 0) { - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - } - - /* Verify the file open succeeds or fails */ - switch (super_vers) { - case 3: - if (high >= H5F_LIBVER_V110) { - /* Should succeed */ - VERIFY(fid >= 0, true, "H5Fopen"); - VERIFY(f->shared->low_bound >= H5F_LIBVER_V110, true, - "HDF5_superblock_ver_bounds"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } - else /* Should fail */ - VERIFY(fid >= 0, false, "H5Fopen"); - break; - - case 2: - if (is_swmr) /* Should fail */ - VERIFY(fid >= 0, false, "H5Fopen"); - else { /* Should succeed */ - VERIFY(fid >= 0, true, "H5Fopen"); - VERIFY(f->shared->low_bound >= H5F_LIBVER_V18, true, - "HDF5_superblock_ver_bounds"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } - break; - - case 1: - case 0: - if (is_swmr) /* Should fail */ - VERIFY(fid >= 0, false, "H5Fopen"); - else { /* Should succeed */ - VERIFY(fid >= 0, true, "H5Fopen"); - VERIFY(f->shared->low_bound, low, "HDF5_superblock_ver_bounds"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } - break; - - default: - break; - } /* end switch */ -#endif - } /* end for */ - } /* end for */ - - /* Close the file access property list */ - ret = H5Pclose(new_fapl); - CHECK(ret, FAIL, "H5Pclose"); - } /* end else */ - -} /* end test_libver_bounds_super_open() */ -#endif - -/**************************************************************** -** -** test_libver_bounds_obj(): -** Verify object header versions: -** -** (a) Create a file with: -** --the input fapl -** --a fcpl that has shared message enabled -** Verify the root group's object header version. -** Close the file. -** -** (b) Create another file with: -** --the input fapl -** --a default fcpl -** Verify the root group's object header version. -** Close the file. -** -** (c) Reopen the same file in (b) with a new fapl. -** The new fapl is set to the 5 pairs of low/high -** bounds in a "for" loop. For each setting in fapl: -** --Create a group in the file -** --Verify the group's object header version -** --Close and delete the group -** --Close the file -** -****************************************************************/ -#if 0 -static void -test_libver_bounds_obj(hid_t fapl) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ - hid_t gid = H5I_INVALID_HID; /* Group ID */ - hid_t fcpl = H5I_INVALID_HID; /* File creation property list */ - hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ - H5F_t *f = NULL; /* Internal file pointer */ - H5F_libver_t low, high; /* Low and high bounds */ - H5O_native_info_t ninfo; /* Object info */ - H5G_info_t ginfo; /* Group info */ - herr_t ret; /* Return value */ - - /* Retrieve the low/high bounds from the input fapl */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - /* Create a default file creation property list */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); - - /* Enable shared message in the fcpl */ - /* This will result in a version 2 object header */ - ret = H5Pset_shared_mesg_nindexes(fcpl, 1); - CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_ATTR_FLAG, 2); - CHECK(ret, FAIL, "H5Pset_shared_mesg_index"); - - /* Create the file with the fcpl and the input fapl */ - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Get root group's object info */ - ret = H5Oget_native_info_by_name(fid, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - - /* Verify object header version is 2 because shared message is enabled */ - VERIFY(ninfo.hdr.version, H5O_VERSION_2, "H5O_obj_ver_bounds"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close the file creation property list */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Create a file with the default fcpl and input fapl */ - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Get root group's object info */ - ret = H5Oget_native_info_by_name(fid, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - - /* Verify object header version is as indicated by low_bound */ - VERIFY(ninfo.hdr.version, H5O_obj_ver_bounds[low], "H5O_obj_ver_bounds"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a new default file access property list which - is used to open the file in the "for" loop */ - new_fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); - - /* Loop through all the combinations of low/high bounds in new_fapl */ - /* Open the file with the fapl; create a group and verify the - object header version, then delete the group and close the file.*/ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - H5E_BEGIN_TRY - { - ret = H5Pset_libver_bounds(new_fapl, low, high); - } - H5E_END_TRY - - if (ret < 0) /* Invalid combinations */ - continue; - - /* Open the file */ - H5E_BEGIN_TRY - { - fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); - } - H5E_END_TRY - - if (fid >= 0) { /* The file open succeeds */ - - /* Get the internal file pointer */ - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - - /* Create a group in the file */ - gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Get group information */ - ret = H5Gget_info(gid, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - - /* Verify group storage type */ - if (f->shared->low_bound >= H5F_LIBVER_V18) - /* Links in group are stored in object header */ - VERIFY(ginfo.storage_type, H5G_STORAGE_TYPE_COMPACT, "H5Gget_info"); - else - /* Links in group are stored with a "symbol table" */ - VERIFY(ginfo.storage_type, H5G_STORAGE_TYPE_SYMBOL_TABLE, "H5Gget_info"); - - /* Get object header information */ - ret = H5Oget_native_info_by_name(gid, GRP_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - - /* Verify object header version as indicated by low_bound */ - VERIFY(ninfo.hdr.version, H5O_obj_ver_bounds[f->shared->low_bound], "H5O_obj_ver_bounds"); - - /* Close the group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Delete the group */ - ret = H5Ldelete(fid, GRP_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - } /* end if */ - } /* end for */ - } /* end for */ - - /* Close the file access property list */ - ret = H5Pclose(new_fapl); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_libver_bounds_obj() */ - -/**************************************************************** -** -** test_libver_bounds_dataset(): -** Verify message versions associated with datasets: -** -** (a) Create a file with default fcpl and the input fapl. -** Create the following two datasets: -** --A contiguous dataset -** --A chunked dataset with "no filter edge chunks" -** For both datasets, verify the versions for the layout, -** fill value and filter pipeline messages. -** Close the file. -** -** (b) Create a new fapl that is set to the 5 pairs of low/high -** bounds in a "for" loop. For each pair of setting in the -** new fapl: -** --Open the same file in (a) with the fapl -** --Create a chunked dataset with 2 unlimited -** dimensions -** --Verify the versions for the layout, fill value -** and filter pipeline messages -** --Close and delete the dataset -** --Close the file -** -****************************************************************/ -static void -test_libver_bounds_dataset(hid_t fapl) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ - hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ - hid_t did = H5I_INVALID_HID; /* Dataset ID */ - hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ - hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ - H5D_t *dset = NULL; /* Internal dataset pointer */ - H5F_t *f = NULL; /* Internal file pointer */ - H5F_libver_t low, high; /* Low and high bounds */ - herr_t ret; /* Return value */ - hsize_t fix_dims2[2] = {10, 4}; /* Dimension sizes */ - hsize_t fix_chunks2[2] = {4, 3}; /* Chunk dimension sizes */ - hsize_t dims2[2] = {1, 4}; /* Dimension sizes */ - hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */ - hsize_t chunks2[2] = {4, 5}; /* Chunk dimension sizes */ - - /* Retrieve the low/high bounds from the input fapl */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - /* Create the file with the input fapl */ - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create the dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, H5I_INVALID_HID, "H5Screate"); - - /* Create a contiguous dataset */ - did = H5Dcreate2(fid, DSETA, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate"); - - /* Get the internal dataset pointer */ - dset = (H5D_t *)H5VL_object(did); - CHECK_PTR(dset, "H5VL_object"); - - /* Verify version for layout and fill value messages */ - if (low == H5F_LIBVER_EARLIEST) { - /* For layout message: the earliest version the library will set is 3 */ - /* For fill value message: the earliest version the library will set is 2 */ - VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_DEFAULT, "H5O_layout_ver_bounds"); - VERIFY(dset->shared->dcpl_cache.fill.version, H5O_FILL_VERSION_2, "H5O_fill_ver_bounds"); - } - else { - VERIFY(dset->shared->layout.version, H5O_layout_ver_bounds[low], "H5O_layout_ver_bounds"); - VERIFY(dset->shared->dcpl_cache.fill.version, H5O_fill_ver_bounds[low], "H5O_fill_ver_bounds"); - } - - /* Verify filter pipeline message version */ - VERIFY(dset->shared->dcpl_cache.pline.version, H5O_pline_ver_bounds[low], "H5O_pline_ver_bounds"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Set up dataspace and dcpl for creating a chunked dataset - with "no filter edge chunks" enabled. - This will result in a version 4 layout message */ - sid = H5Screate_simple(2, fix_dims2, NULL); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 2, fix_chunks2); - CHECK(ret, FAIL, "H5Pset_chunk"); - ret = H5Pset_chunk_opts(dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); - CHECK(ret, FAIL, "H5Pset_chunk_opts"); - - /* Create the chunked dataset */ - H5E_BEGIN_TRY - { - did = H5Dcreate2(fid, DSETB, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - } - H5E_END_TRY - - if (did >= 0) { - - /* Get the internal dataset pointer */ - dset = (H5D_t *)H5VL_object(did); - CHECK_PTR(dset, "H5VL_object"); - - /* Verify layout message version and chunk indexing type */ - VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_4, "H5O_layout_ver_bounds"); - VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_FARRAY, "chunk_index_type"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - } - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Create a default file access property list which is used - to open the file in the 'for' loop */ - new_fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); - - /* Set up dataspace and dcpl for creating a chunked dataset with - 2 unlimited dimensions in the 'for' loop */ - sid = H5Screate_simple(2, dims2, max_dims2); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 2, chunks2); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Loop through all the combinations of low/high bounds in new_fapl */ - /* Open the file with the fapl and create the chunked dataset */ - /* Verify the dataset's layout, fill value and filter pipeline message versions */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - H5E_BEGIN_TRY - { - ret = H5Pset_libver_bounds(new_fapl, low, high); - } - H5E_END_TRY - - if (ret < 0) /* Invalid low/high combinations */ - continue; - - /* Open the file */ - H5E_BEGIN_TRY - { - fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); - } - H5E_END_TRY - - if (fid >= 0) { /* The file open succeeds */ - - /* Get the internal file pointer */ - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - - /* Create the chunked dataset */ - did = H5Dcreate2(fid, DSETC, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - - /* Get the internal file pointer */ - dset = (H5D_t *)H5VL_object(did); - CHECK_PTR(dset, "H5VL_object"); - - if (dset) { - /* Verify the dataset's layout, fill value and filter pipeline message versions */ - /* Also verify the chunk indexing type */ - if (f->shared->low_bound == H5F_LIBVER_EARLIEST) { - /* For layout message: the earliest version the library will set is 3 */ - /* For fill value message: the earliest version the library will set is 2 */ - VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_DEFAULT, - "H5O_layout_ver_bounds"); - VERIFY(dset->shared->dcpl_cache.fill.version, H5O_FILL_VERSION_2, - "H5O_fill_ver_bounds"); - } - else { - VERIFY(dset->shared->layout.version, H5O_layout_ver_bounds[f->shared->low_bound], - "H5O_layout_ver_bounds"); - VERIFY(dset->shared->dcpl_cache.fill.version, - H5O_fill_ver_bounds[f->shared->low_bound], "H5O_fill_ver_bounds"); - } - - /* Verify the filter pipeline message version */ - VERIFY(dset->shared->dcpl_cache.pline.version, H5O_pline_ver_bounds[f->shared->low_bound], - "H5O_pline_ver_bounds"); - - /* Verify the dataset's chunk indexing type */ - if (dset->shared->layout.version == H5O_LAYOUT_VERSION_LATEST) - VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_BT2, "chunk_index_type"); - else - VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_BTREE, - "chunk_index_type"); - } - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Delete the dataset */ - ret = H5Ldelete(fid, DSETC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - } /* end if */ - } /* end for */ - } /* end for */ - - /* Close the file access property list */ - ret = H5Pclose(new_fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_libver_bounds_dataset() */ - -/**************************************************************** -** -** test_libver_bounds_dataspace(): -** Verify dataspace message versions: -** -** (a) Create a file with default fcpl and the input fapl. -** Create the following two datasets: -** --A dataset with scalar dataspace -** --A dataset with null dataspace -** For both datasets, verify the dataspace message versions. -** Close the file. -** -** (b) Create a new fapl that is set to the 5 pairs of low/high -** bounds in a "for" loop. For each pair of setting in the -** new fapl: -** --Open the same file in (a) with the fapl -** --Create a chunked dataset, a compact dataset and -** a contiguous dataset -** --Verify the dataspace message version for these -** three datasets -** --Delete the three datasets and the dataspaces -** --Close the file -** -****************************************************************/ -static void -test_libver_bounds_dataspace(hid_t fapl) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ - hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ - hid_t did = H5I_INVALID_HID, did_null = H5I_INVALID_HID; /* Dataset IDs */ - hid_t did_compact = H5I_INVALID_HID, did_contig = H5I_INVALID_HID; /* Dataset IDs */ - hid_t sid = H5I_INVALID_HID, sid_null = H5I_INVALID_HID; /* Dataspace IDs */ - hid_t sid_compact = H5I_INVALID_HID, sid_contig = H5I_INVALID_HID; /* Dataspace IDs */ - hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ - hid_t dcpl_compact = H5I_INVALID_HID, dcpl_contig = H5I_INVALID_HID; /* Dataset creation property lists */ - H5S_t *space = NULL, *space_null = NULL; /* Internal dataspace pointers */ - H5F_t *f = NULL; /* Internal file pointer */ - H5F_libver_t low, high; /* Low and high bounds */ - hsize_t dims[1] = {1}; /* Dimension sizes */ - hsize_t dims2[2] = {5, 4}; /* Dimension sizes */ - hsize_t max_dims[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes */ - hsize_t chunks[1] = {4}; /* Chunk dimension sizes */ - herr_t ret; /* Return value */ - - /* Retrieve the low/high bounds from the input fapl */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - /* Create the file with the input fapl */ - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create scalar dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, H5I_INVALID_HID, "H5Screate"); - - /* Create a dataset with the scalar dataspace */ - did = H5Dcreate2(fid, DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate"); - - /* Get the internal dataspace pointer */ - sid = H5Dget_space(did); - CHECK(sid, H5I_INVALID_HID, "H5Dget_space"); - space = (H5S_t *)H5I_object(sid); - CHECK_PTR(space, "H5I_object"); - - /* Verify the dataspace version */ - VERIFY(space->extent.version, H5O_sdspace_ver_bounds[low], "H5O_sdspace_ver_bounds"); - - /* Create null dataspace */ - sid_null = H5Screate(H5S_NULL); - CHECK(sid_null, H5I_INVALID_HID, "H5Screate"); - - /* Create a dataset with the null dataspace */ - did_null = H5Dcreate2(fid, DSET_NULL, H5T_NATIVE_INT, sid_null, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did_null, H5I_INVALID_HID, "H5Dcreate"); - - /* Get the internal dataspace pointer */ - sid_null = H5Dget_space(did_null); - CHECK(sid_null, H5I_INVALID_HID, "H5Dget_space"); - space_null = (H5S_t *)H5I_object(sid_null); - CHECK_PTR(space_null, "H5I_object"); - - /* Verify the dataspace version */ - VERIFY(space_null->extent.version, H5O_SDSPACE_VERSION_2, "H5O_sdspace_ver_bounds"); - - /* Close the datasets */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(did_null); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid_null); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a default file access property list which is used - to open the file in the 'for' loop */ - new_fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); - - /* Set up dataspace and dcpl for creating a chunked dataset */ - sid = H5Screate_simple(1, dims, max_dims); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 1, chunks); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Set up dataspace and dcpl for creating a compact dataset */ - sid_compact = H5Screate_simple(1, dims, NULL); - CHECK(sid_compact, H5I_INVALID_HID, "H5Screate_simple"); - dcpl_compact = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl_compact, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_layout(dcpl_compact, H5D_COMPACT); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Set up dataspace and dcpl for creating a contiguous dataset */ - sid_contig = H5Screate_simple(2, dims2, NULL); - CHECK(sid_contig, H5I_INVALID_HID, "H5Screate_simple"); - dcpl_contig = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl_contig, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_layout(dcpl_contig, H5D_CONTIGUOUS); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Loop through all the combinations of low/high bounds in new_fapl */ - /* Open the file and create the chunked/compact/contiguous datasets */ - /* Verify the dataspace message version for the three datasets */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - hid_t tmp_sid, tmp_sid_compact, tmp_sid_contig; /* Dataspace IDs */ - H5S_t *tmp_space, *tmp_space_compact, *tmp_space_contig; /* Internal dataspace pointers */ - - H5E_BEGIN_TRY - { - ret = H5Pset_libver_bounds(new_fapl, low, high); - } - H5E_END_TRY - - if (ret < 0) /* Invalid low/high combinations */ - continue; - - /* Open the file */ - H5E_BEGIN_TRY - { - fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); - } - H5E_END_TRY - - if (fid >= 0) { /* The file open succeeds */ - - /* Get the internal file pointer */ - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - - /* Create the chunked dataset */ - did = H5Dcreate2(fid, DSETA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - - /* Get the internal dataspace pointer for the chunked dataset */ - tmp_sid = H5Dget_space(did); - CHECK(tmp_sid, H5I_INVALID_HID, "H5Dget_space"); - tmp_space = (H5S_t *)H5I_object(tmp_sid); - CHECK_PTR(tmp_space, "H5I_object"); - - /* Create the compact dataset */ - did_compact = H5Dcreate2(fid, DSETB, H5T_NATIVE_INT, sid_compact, H5P_DEFAULT, dcpl_compact, - H5P_DEFAULT); - CHECK(did_compact, H5I_INVALID_HID, "H5Dcreate2"); - - /* Get the internal dataspace pointer for the compact dataset */ - tmp_sid_compact = H5Dget_space(did_compact); - CHECK(tmp_sid_compact, H5I_INVALID_HID, "H5Dget_space"); - tmp_space_compact = (H5S_t *)H5I_object(tmp_sid_compact); - CHECK_PTR(tmp_space_compact, "H5I_object"); - - /* Create the contiguous dataset */ - did_contig = - H5Dcreate2(fid, DSETC, H5T_NATIVE_INT, sid_contig, H5P_DEFAULT, dcpl_contig, H5P_DEFAULT); - CHECK(did_contig, H5I_INVALID_HID, "H5Dcreate2"); - - /* Get the internal dataspace pointer for the contiguous dataset */ - tmp_sid_contig = H5Dget_space(did_contig); - CHECK(tmp_sid_contig, H5I_INVALID_HID, "H5Dget_space"); - tmp_space_contig = (H5S_t *)H5I_object(tmp_sid_contig); - CHECK_PTR(tmp_space_contig, "H5I_object"); - - if (tmp_space) { - /* Verify versions for the three dataspaces */ - VERIFY(tmp_space->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound], - "H5O_sdspace_ver_bounds"); - } - if (tmp_space_compact) { - VERIFY(tmp_space_compact->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound], - "H5O_sdspace_ver_bounds"); - } - if (tmp_space_contig) { - VERIFY(tmp_space_contig->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound], - "H5O_sdspace_ver_bounds"); - } - - /* Close the three datasets */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(did_compact); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Dclose(did_contig); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the three dataspaces */ - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(tmp_sid_compact); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(tmp_sid_contig); - CHECK(ret, FAIL, "H5Sclose"); - - /* Delete the three datasets */ - ret = H5Ldelete(fid, DSETA, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSETB, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - ret = H5Ldelete(fid, DSETC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - } /* end if */ - } /* end for */ - } /* end for */ - - /* Close the file access property list */ - ret = H5Pclose(new_fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the three dataspaces */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid_compact); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid_contig); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the three dataset creation property lists */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(dcpl_compact); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(dcpl_contig); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_libver_bounds_dataspace() */ - -/**************************************************************** -** -** test_libver_bounds_datatype(): -** Verify the datatype message version: -** -** (a) Create the following datatypes: -** 1) integer -** 2) enum -** 3) array -** 4) compound -** 5) vlen -** (b) Call test_libver_bounds_datatype_check() for each -** datatype in (a) to verify the datatype message version. -** -****************************************************************/ -static void -test_libver_bounds_datatype(hid_t fapl) -{ - hid_t tid = H5I_INVALID_HID, tid_enum = H5I_INVALID_HID, tid_array = H5I_INVALID_HID; /* Datatype IDs */ - hid_t tid_compound = H5I_INVALID_HID, tid_vlen = H5I_INVALID_HID; /* Datatype IDs */ - int enum_value; /* Value for enum datatype */ - typedef struct s1 { /* Data structure for compound datatype */ - char c; - int i; - } s1; - hsize_t dims[1] = {1}; /* Dimension sizes */ - herr_t ret; /* Return value */ - - /* Create integer datatype */ - tid = H5Tcopy(H5T_NATIVE_INT); - - /* Verify datatype message version */ - test_libver_bounds_datatype_check(fapl, tid); - - /* Create enum datatype */ - tid_enum = H5Tenum_create(tid); - enum_value = 0; - H5Tenum_insert(tid_enum, "val1", &enum_value); - enum_value = 1; - H5Tenum_insert(tid_enum, "val2", &enum_value); - - /* Verify datatype message version */ - test_libver_bounds_datatype_check(fapl, tid_enum); - - /* Create array datatype */ - tid_array = H5Tarray_create2(tid, 1, dims); - - /* Verify datatype message version */ - test_libver_bounds_datatype_check(fapl, tid_array); - - /* Create compound datatype */ - tid_compound = H5Tcreate(H5T_COMPOUND, sizeof(s1)); - H5Tinsert(tid_compound, "c", HOFFSET(s1, c), H5T_STD_U8LE); - H5Tinsert(tid_compound, "i", HOFFSET(s1, i), H5T_NATIVE_INT); - - /* Verify datatype message version */ - test_libver_bounds_datatype_check(fapl, tid_compound); - - /* Create vlen datatype */ - tid_vlen = H5Tvlen_create(tid); - - /* Verify datatype message version */ - test_libver_bounds_datatype_check(fapl, tid_vlen); - - /* Close the datatypes */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Tclose(tid_enum); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Tclose(tid_array); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Tclose(tid_compound); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Tclose(tid_vlen); - CHECK(ret, FAIL, "H5Tclose"); - -} /* end test_libver_bounds_datatype() */ - -/**************************************************************** -** -** test_libver_bounds_datatype_check(): -** Helper routine called by test_libver_bounds_datatype() -** to verify the datatype message version for the input tid: -** -** (a) Create a file with default fcpl and the input fapl. -** Create a contiguous dataset with the input tid. -** Verify the datatype message version. -** Create a committed datatype of string to be -** used later. -** Close the file. -** -** (b) Create a new fapl that is set to the 5 pairs of low/high -** bounds in a "for" loop. For each pair of setting in -** the new fapl: -** --Open the same file in (a) with the fapl -** --Verify the message version for the committed -** datatype created earlier -** --Create a chunked dataset with the input tid -** --Verify the datatype message version -** --Close and delete the dataset -** --Close the file -** -****************************************************************/ -static void -test_libver_bounds_datatype_check(hid_t fapl, hid_t tid) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ - hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ - hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */ - hid_t dtid = H5I_INVALID_HID; /* Datatype ID for the dataset */ - hid_t str_tid = H5I_INVALID_HID; /* String datatype ID */ - hid_t did = H5I_INVALID_HID; /* Dataset ID */ - hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ - hsize_t dims[1] = {1}; /* Dimension sizes */ - hsize_t dims2[2] = {5, 4}; /* Dimension sizes */ - hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */ - hsize_t chunks[2] = {2, 3}; /* Chunk dimension sizes */ - H5T_t *dtype = NULL; /* Internal datatype pointer */ - H5T_t *str_dtype = NULL; /* Internal datatype pointer for the string datatype */ - H5F_t *f = NULL; /* Internal file pointer */ - H5F_libver_t low, high; /* Low and high bounds */ - herr_t ret; /* Return value */ - - /* Retrieve the low/high version bounds from the input fapl */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - /* Create the file with the input fapl */ - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create a committed datatype of string which will be used - later inside the 'for' loop */ - str_tid = H5Tcopy(H5T_C_S1); - CHECK(str_tid, H5I_INVALID_HID, "H5Tcopy"); - ret = H5Tset_size(str_tid, (size_t)10); - CHECK(ret, FAIL, "H5Tset_size"); - ret = H5Tcommit2(fid, "datatype", str_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - ret = H5Tclose(str_tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create dataspace */ - sid = H5Screate_simple(1, dims, NULL); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset with the input tid */ - did = H5Dcreate2(fid, DSET1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - - /* Get the dataset's datatype */ - dtid = H5Dget_type(did); - CHECK(dtid, H5I_INVALID_HID, "H5Dget_type"); - - /* Get the internal datatype pointer */ - dtype = (H5T_t *)H5I_object(dtid); - CHECK_PTR(dtype, "H5I_object"); - - /* Verify the datatype message version */ - /* H5T_COMPOUND, H5T_ENUM, H5T_ARRAY: - * --the library will set version according to low_bound - * --H5T_ARRAY: the earliest version the library will set is 2 - * H5T_INTEGER, H5T_FLOAT, H5T_TIME, H5T_STRING, H5T_BITFIELD, H5T_OPAQUE, H5T_REFERENCE: - * --the library will only use basic version - */ - - if (dtype->shared->type == H5T_COMPOUND || dtype->shared->type == H5T_ENUM || - dtype->shared->type == H5T_ARRAY) { - if (dtype->shared->type == H5T_ARRAY && low == H5F_LIBVER_EARLIEST) - VERIFY(dtype->shared->version, H5O_DTYPE_VERSION_2, "H5O_dtype_ver_bounds"); - else - VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[low], "H5O_dtype_ver_bounds"); - } - else - VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], "H5O_dtype_ver_bounds"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the datatype */ - ret = H5Tclose(dtid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a default file access property list */ - new_fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate"); - - /* Set up dataspace and dcpl for creating a chunked dataset */ - sid = H5Screate_simple(2, dims2, max_dims2); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 2, chunks); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Loop through all the combinations of low/high bounds */ - /* Open the file and create the chunked dataset with the input tid */ - /* Verify the dataset's datatype message version */ - /* Also verify the committed atatype message version */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - H5E_BEGIN_TRY - { - ret = H5Pset_libver_bounds(new_fapl, low, high); - } - H5E_END_TRY - - if (ret < 0) /* Invalid low/high combinations */ - continue; - - /* Open the file */ - H5E_BEGIN_TRY - { - fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); - } - H5E_END_TRY - - if (fid >= 0) { /* The file open succeeds */ - - /* Get the internal file pointer */ - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - - /* Open the committed datatype */ - str_tid = H5Topen2(fid, "datatype", H5P_DEFAULT); - CHECK(str_tid, FAIL, "H5Topen2"); - str_dtype = (H5T_t *)H5VL_object(str_tid); - CHECK_PTR(str_dtype, "H5VL_object"); - - /* Verify the committed datatype message version */ - VERIFY(str_dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], - "H5O_dtype_ver_bounds"); - - /* Close the committed datatype */ - ret = H5Tclose(str_tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create the chunked dataset */ - did = H5Dcreate2(fid, DSETNAME, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - - /* Get the dataset's datatype */ - dtid = H5Dget_type(did); - CHECK(dtid, H5I_INVALID_HID, "H5Dget_type"); - - /* Get the internal datatype pointer */ - dtype = (H5T_t *)H5I_object(dtid); - CHECK_PTR(dtype, "H5I_object"); - - if (dtype) { - /* Verify the dataset's datatype message version */ - /* H5T_COMPOUND, H5T_ENUM, H5T_ARRAY: - * --the library will set version according to low_bound - * --H5T_ARRAY: the earliest version the library will set is 2 - * H5T_INTEGER, H5T_FLOAT, H5T_TIME, H5T_STRING, H5T_BITFIELD, H5T_OPAQUE, H5T_REFERENCE: - * --the library will only use basic version - */ - if (dtype->shared->type == H5T_COMPOUND || dtype->shared->type == H5T_ENUM || - dtype->shared->type == H5T_ARRAY) { - if (dtype->shared->type == H5T_ARRAY && f->shared->low_bound == H5F_LIBVER_EARLIEST) - VERIFY(dtype->shared->version, H5O_DTYPE_VERSION_2, "H5O_dtype_ver_bounds"); - else - VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[f->shared->low_bound], - "H5O_dtype_ver_bounds"); - } - else - VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], - "H5O_dtype_ver_bounds"); - } - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataset's datatype */ - ret = H5Tclose(dtid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Delete the dataset */ - ret = H5Ldelete(fid, DSETNAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - } /* end if */ - } /* end for */ - } /* end for */ - - /* Close the file access property list */ - ret = H5Pclose(new_fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_libver_bounds_datatype_check() */ - -/**************************************************************** -** -** test_libver_bounds_attributes(): -** Verify the attribute message versions: -** -** (a) Create a file with default fcpl and the input fapl. -** Create a group and attach the following three attributes -** to the group: -** (1) Attribute with a committed datatype -** (2) Attribute with integer type -** (3) Attribute with character encoding set -** Verify the three attributes' message versions. -** Close the file. -** -** (b) Create a fcpl that has shared datatype message enabled. -** Create a file with the fcpl and the input fapl. -** Create a group and attach an attribute with shared -** integer type to the group. -** Verify the attribute message version. -** Close the file -** -** (b) Create a new fapl that is set to the 5 pairs of low/high -** bounds in a "for" loop. For each pair of setting in -** the new fapl: -** --Open the same file in (b) with the fapl -** --Open the group and attach an attribute with integer -** type to the group -** --Verify the attribute message version -** --Delete the attribute -** --Close the group and the file -** -****************************************************************/ -static void -test_libver_bounds_attributes(hid_t fapl) -{ - hid_t fid = H5I_INVALID_HID; /* File ID */ - hid_t fcpl = H5I_INVALID_HID; /* File creation property list */ - hid_t new_fapl = H5I_INVALID_HID; /* File access property list */ - hid_t tid = H5I_INVALID_HID; /* Datatype ID */ - hid_t gid = H5I_INVALID_HID; /* Group ID */ - hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ - hid_t aid = H5I_INVALID_HID; /* Attribute ID */ - hid_t attr_cpl = H5I_INVALID_HID; /* Attribute creation property list */ - H5A_t *attr = NULL; /* Internal attribute pointer */ - H5F_t *f = NULL; /* Internal file pointer */ - H5F_libver_t low, high; /* Low and high bounds */ - herr_t ret; /* Return value */ - - /* Retrieve the low/high bounds from the input fapl */ - ret = H5Pget_libver_bounds(fapl, &low, &high); - CHECK(ret, FAIL, "H5Pget_libver_bounds"); - - /* Create the file */ - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Integer datatype */ - tid = H5Tcopy(H5T_NATIVE_INT); - CHECK(tid, H5I_INVALID_HID, "H5Tcopy"); - - /* Create a committed datatype */ - ret = H5Tcommit2(fid, "datatype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, H5I_INVALID_HID, "H5Screate"); - - /* Create a group */ - gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, H5I_INVALID_HID, "H5Gcreate2"); - - /* Attach an attribute to the group with the committed datatype */ - aid = H5Acreate2(gid, "attr1", tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); - - /* Get the internal attribute pointer */ - attr = (H5A_t *)H5VL_object(aid); - CHECK_PTR(attr, "H5VL_object"); - - /* Verify the attribute version */ - if (low == H5F_LIBVER_EARLIEST) - /* The earliest version the library can set for an attribute with committed datatype is 2 */ - VERIFY(attr->shared->version, H5O_ATTR_VERSION_2, "H5O_attr_ver_bounds"); - else - VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); - - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create an attribute to the group with integer type */ - aid = H5Acreate2(gid, "attr2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Get the internal attribute pointer */ - attr = (H5A_t *)H5VL_object(aid); - CHECK_PTR(attr, "H5VL_object"); - - /* Verify attribute version */ - VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); - - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Enable character encoding in attribute creation property list */ - attr_cpl = H5Pcreate(H5P_ATTRIBUTE_CREATE); - CHECK(attr_cpl, H5I_INVALID_HID, "H5Pcreate"); - ret = H5Pset_char_encoding(attr_cpl, H5T_CSET_UTF8); - CHECK(ret, FAIL, "H5Pset_char_encoding"); - - /* Attach an attribute to the group with character encoding set */ - aid = H5Acreate2(gid, "attr3", H5T_NATIVE_INT, sid, attr_cpl, H5P_DEFAULT); - CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); - - /* Get internal attribute pointer */ - attr = (H5A_t *)H5VL_object(aid); - CHECK_PTR(attr, "H5VL_object"); - - /* Verify attribute version */ - if (low == H5F_LIBVER_EARLIEST) - /* The earliest version the library can set for an attribute with character encoding is 3 */ - VERIFY(attr->shared->version, H5O_ATTR_VERSION_3, "H5O_attr_ver_bounds"); - else - VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); - - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close the attribute creation property list */ - ret = H5Pclose(attr_cpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a copy of the file creation property list */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate"); - - /* Enable shared datatype message */ - ret = H5Pset_shared_mesg_nindexes(fcpl, 1); - CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes"); - ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_DTYPE_FLAG, 2); - CHECK(ret, FAIL, "H5Pset_shared_mesg_index"); - - /* Create the file with shared datatype message enabled */ - fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create an integer datatype */ - tid = H5Tcopy(H5T_NATIVE_INT); - CHECK(tid, H5I_INVALID_HID, "H5Tcopy"); - - /* Create dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, H5I_INVALID_HID, "H5Screate"); - - /* Create a group */ - gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, H5I_INVALID_HID, "H5Gcreate2"); - - /* Attach an attribute to the group with shared integer datatype */ - aid = H5Acreate2(gid, ATTR_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); - - /* Get the internal attribute pointer */ - attr = (H5A_t *)H5VL_object(aid); - CHECK_PTR(attr, "H5VL_object"); - - /* Verify the attribute version */ - if (low == H5F_LIBVER_EARLIEST) - /* The earliest version the library can set for an attribute with shared datatype is 2 */ - VERIFY(attr->shared->version, H5O_ATTR_VERSION_2, "H5O_attr_ver_bounds"); - else - VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds"); - - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close the group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a default file access property list */ - new_fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(new_fapl, FAIL, "H5Pcreate"); - - /* Create a scalar dataspace to be used later for the attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, H5I_INVALID_HID, "H5Screate"); - - /* Loop through all the combinations of low/high bounds */ - /* Open the file and group and attach an attribute to the group */ - /* Verify the attribute version */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - H5E_BEGIN_TRY - { - ret = H5Pset_libver_bounds(new_fapl, low, high); - } - H5E_END_TRY - - if (ret < 0) /* Invalid low/high combinations */ - continue; - - /* Open the file */ - H5E_BEGIN_TRY - { - fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl); - } - H5E_END_TRY - - if (fid >= 0) { /* The file open succeeds */ - - /* Get the internal file pointer */ - f = (H5F_t *)H5VL_object(fid); - CHECK_PTR(f, "H5VL_object"); - - /* Open the group */ - gid = H5Gopen2(fid, GRP_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Attach an attribute to the group */ - aid = H5Acreate2(gid, "attr1", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Get the internal attribute pointer */ - attr = (H5A_t *)H5VL_object(aid); - CHECK_PTR(attr, "H5VL_object"); - - /* Verify the attribute message version */ - VERIFY(attr->shared->version, H5O_attr_ver_bounds[f->shared->low_bound], - "H5O_attr_ver_bounds"); - - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Delete the attribute */ - ret = H5Adelete(gid, "attr1"); - CHECK(ret, FAIL, "H5Adelete"); - - /* Close the group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - } /* end if */ - } /* end for */ - } /* end for */ - - /* Close the file access property list */ - ret = H5Pclose(new_fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* end test_libver_bounds_attributes() */ - -/**************************************************************** -** -** test_libver_macros(): -** Verify that H5_VERSION_GE and H5_VERSION_LE work correactly. -** -****************************************************************/ -static void -test_libver_macros(void) -{ - int major = H5_VERS_MAJOR; - int minor = H5_VERS_MINOR; - int release = H5_VERS_RELEASE; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing macros for library version comparison\n")); - - VERIFY(H5_VERSION_GE(major, minor, release), true, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major - 1, minor, release), true, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major - 1, minor + 1, release), true, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major - 1, minor, release + 1), true, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major, minor - 1, release), true, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major, minor - 1, release + 1), true, "H5_VERSION_GE"); - if (H5_VERS_RELEASE > 0) - VERIFY(H5_VERSION_GE(major, minor, release - 1), true, "H5_VERSION_GE"); - - VERIFY(H5_VERSION_GE(major + 1, minor, release), false, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major + 1, minor - 1, release), false, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major + 1, minor - 1, release - 1), false, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major, minor + 1, release), false, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major, minor + 1, release - 1), false, "H5_VERSION_GE"); - VERIFY(H5_VERSION_GE(major, minor, release + 1), false, "H5_VERSION_GE"); - - VERIFY(H5_VERSION_LE(major, minor, release), true, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major + 1, minor, release), true, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major + 1, minor - 1, release), true, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major + 1, minor - 1, release - 1), true, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major, minor + 1, release), true, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major, minor + 1, release - 1), true, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major, minor, release + 1), true, "H5_VERSION_LE"); - - VERIFY(H5_VERSION_LE(major - 1, minor, release), false, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major - 1, minor + 1, release), false, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major - 1, minor + 1, release + 1), false, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major, minor - 1, release), false, "H5_VERSION_LE"); - VERIFY(H5_VERSION_LE(major, minor - 1, release + 1), false, "H5_VERSION_LE"); - if (H5_VERS_RELEASE > 0) - VERIFY(H5_VERSION_LE(major, minor, release - 1), false, "H5_VERSION_LE"); -} /* test_libver_macros() */ - -/**************************************************************** -** -** test_libver_macros2(): -** Verify that H5_VERSION_GE works correactly and show how -** to use it. -** -****************************************************************/ -static void -test_libver_macros2(void) -{ - hid_t file; - hid_t grp; - htri_t status; - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing macros for library version comparison with a file\n")); - - /* - * Create a file. - */ - file = H5Fcreate(FILE6, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* - * Create a group in the file. - */ - grp = H5Gcreate2(file, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Gcreate"); - - /* - * Close the group - */ - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* - * Delete the group using different function based on the library version. - * And verify the action. - */ -#if H5_VERSION_GE(1, 8, 0) - ret = H5Ldelete(file, "Group", H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lunlink"); - - status = H5Lexists(file, "Group", H5P_DEFAULT); - VERIFY(status, false, "H5Lexists"); -#else - ret = H5Gunlink(file, "Group"); - CHECK(ret, FAIL, "H5Gunlink"); - - H5E_BEGIN_TRY - { - grp = H5Gopen(file, "Group"); - } - H5E_END_TRY - VERIFY(grp, FAIL, "H5Gopen"); -#endif - - /* - * Close the file. - */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - -} /* test_libver_macros2() */ -#endif - -#if 0 -/**************************************************************** -** -** test_filesize(): -** Verify H5Fincrement_filesize() and H5Fget_eoa() works as -** indicated in the "RFC: Enhancement to the tool h5clear". -** -****************************************************************/ -static void -test_incr_filesize(void) -{ - hid_t fid; /* File opened with read-write permission */ - h5_stat_size_t filesize; /* Size of file when empty */ - hid_t fcpl; /* File creation property list */ - hid_t fapl; /* File access property list */ - hid_t dspace; /* Dataspace ID */ - hid_t dset; /* Dataset ID */ - hid_t dcpl; /* Dataset creation property list */ - unsigned u; /* Local index variable */ - char filename[FILENAME_LEN]; /* Filename to use */ - char name[32]; /* Dataset name */ - haddr_t stored_eoa; /* The stored EOA value */ - hid_t driver_id = -1; /* ID for this VFD */ - unsigned long driver_flags = 0; /* VFD feature flags */ - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Fincrement_filesize() and H5Fget_eoa())\n")); - - fapl = h5_fileaccess(); - h5_fixname(FILE8, fapl, filename, sizeof filename); - - /* Get the VFD feature flags */ - driver_id = H5Pget_driver(fapl); - CHECK(driver_id, FAIL, "H5Pget_driver"); - - ret = H5FDdriver_query(driver_id, &driver_flags); - CHECK(ret, FAIL, "H5PDdriver_query"); - - /* Check whether the VFD feature flag supports these two public routines */ - if (driver_flags & H5FD_FEAT_SUPPORTS_SWMR_IO) { - - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* Set file space strategy */ - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, false, (hsize_t)1); - CHECK(ret, FAIL, "H5P_set_file_space_strategy"); - - /* Create the test file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create datasets in file */ - for (u = 0; u < 10; u++) { - snprintf(name, sizeof(name), "Dataset %u", u); - dset = H5Dcreate2(fid, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ - - /* Close dataspace */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Get the file size */ - filesize = h5_get_file_size(filename, fapl); - - /* Open the file */ - fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Get the stored EOA */ - ret = H5Fget_eoa(fid, &stored_eoa); - CHECK(ret, FAIL, "H5Fget_eoa"); - - /* Verify the stored EOA is the same as filesize */ - VERIFY(filesize, stored_eoa, "file size"); - - /* Set the EOA to the MAX(EOA, EOF) + 512 */ - ret = H5Fincrement_filesize(fid, 512); - CHECK(ret, FAIL, "H5Fincrement_filesize"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Get the file size */ - filesize = h5_get_file_size(filename, fapl); - - /* Verify the filesize is the previous stored_eoa + 512 */ - VERIFY(filesize, stored_eoa + 512, "file size"); - - /* Close the file access property list */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the file creation property list */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - } -} /* end test_incr_filesize() */ -#endif - -/**************************************************************** -** -** test_min_dset_ohdr(): -** Test API calls to toggle dataset object header minimization. -** -** TODO (as separate function?): -** + setting persists between close and (re)open? -** + dataset header sizes created while changing value of toggle -** -****************************************************************/ -#if 0 -static void -test_min_dset_ohdr(void) -{ - const char basename[] = "min_dset_ohdr_testfile"; - char filename[FILENAME_LEN] = ""; - hid_t file_id = -1; - hid_t file2_id = -1; - bool minimize; - herr_t ret; - - MESSAGE(5, ("Testing dataset object header minimization\n")); - - /*********/ - /* SETUP */ - /*********/ - - h5_fixname(basename, H5P_DEFAULT, filename, sizeof(filename)); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK_I(file_id, "H5Fcreate"); - - /*********/ - /* TESTS */ - /*********/ - - /*---------------------------------------- - * TEST default value - */ - ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); - CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - VERIFY(minimize, false, "minimize flag"); - - /*---------------------------------------- - * TEST set to true - */ - ret = H5Fset_dset_no_attrs_hint(file_id, true); - CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint"); - - ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); - CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - VERIFY(minimize, true, "minimize flag"); - - /*---------------------------------------- - * TEST second file open on same filename - */ - file2_id = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK_I(file2_id, "H5Fopen"); - - /* verify true setting on second open - */ - ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); - CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - VERIFY(minimize, true, "minimize flag"); - - /* re-set to false on first open - */ - ret = H5Fset_dset_no_attrs_hint(file_id, false); - CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint"); - - /* verify false set on both opens - */ - ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); - CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - VERIFY(minimize, false, "minimize flag"); - - ret = H5Fget_dset_no_attrs_hint(file2_id, &minimize); - CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - VERIFY(minimize, false, "minimize flag"); - - /* re-set to true on second open - */ - ret = H5Fset_dset_no_attrs_hint(file2_id, true); - CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint"); - - /* verify true set on both opens - */ - ret = H5Fget_dset_no_attrs_hint(file_id, &minimize); - CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - VERIFY(minimize, true, "minimize flag"); - - ret = H5Fget_dset_no_attrs_hint(file2_id, &minimize); - CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - VERIFY(minimize, true, "minimize flag"); - - /*---------------------------------------- - * TEST error cases - */ - - /* trying to set with invalid file ID */ - H5E_BEGIN_TRY - { - ret = H5Fset_dset_no_attrs_hint(-1, true); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fset_dset_no_attrs_hint"); - - /* trying to get with invalid file ID */ - H5E_BEGIN_TRY - { - ret = H5Fget_dset_no_attrs_hint(-1, &minimize); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - - /* trying to get with invalid pointer */ - H5E_BEGIN_TRY - { - ret = H5Fget_dset_no_attrs_hint(file_id, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fget_dset_no_attrs_hint"); - - /************/ - /* TEARDOWN */ - /************/ - - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(file2_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_min_dset_ohdr() */ -#endif - -/**************************************************************** -** -** test_deprec(): -** Test deprecated functionality. -** -****************************************************************/ -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS -static void -test_deprec(const char *env_h5_drvr) -{ - hid_t file; /* File IDs for old & new files */ - hid_t fcpl; /* File creation property list */ - hid_t fapl; /* File creation property list */ - hid_t new_fapl; - hsize_t align; - unsigned super; /* Superblock version # */ - unsigned freelist; /* Free list version # */ - unsigned stab; /* Symbol table entry version # */ - unsigned shhdr; /* Shared object header version # */ - H5F_info1_t finfo; /* global information about file */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing deprecated routines\n")); - - /* Creating a file with the default file creation property list should - * create a version 0 superblock - */ - - /* Create file with default file creation property list */ - file = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Get the file's version information */ - ret = H5Fget_info1(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info1"); - VERIFY(finfo.super_ext_size, 0, "H5Fget_info1"); - VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1"); - VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1"); - VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1"); - - /* Get the file's dataset creation property list */ - fcpl = H5Fget_create_plist(file); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); - - /* Get the file's version information */ - ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr); - CHECK(ret, FAIL, "H5Pget_version"); - VERIFY(super, 0, "H5Pget_version"); - VERIFY(freelist, 0, "H5Pget_version"); - VERIFY(stab, 0, "H5Pget_version"); - VERIFY(shhdr, 0, "H5Pget_version"); - - /* Close FCPL */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Only run this part of the test with the sec2/default driver */ - if (h5_using_default_driver(env_h5_drvr)) { - /* Create a file creation property list */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* Set a property in the FCPL that will push the superblock version up */ - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0); - ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512); - CHECK(ret, FAIL, "H5Pset_file_space_strategy"); - - fapl = H5Pcreate(H5P_FILE_ACCESS); - ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024); - CHECK(ret, FAIL, "H5Pset_alignment"); - - /* Creating a file with the non-default file creation property list should - * create a version 2 superblock - */ - - /* Create file with custom file creation property list */ - file = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - new_fapl = H5Fget_access_plist(file); - H5Pget_alignment(new_fapl, NULL, &align); - - /* Close FCPL */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Get the file's version information */ - ret = H5Fget_info1(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info1"); - VERIFY(finfo.super_ext_size, 152, "H5Fget_info1"); - VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1"); - VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1"); - VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1"); - - /* Get the file's dataset creation property list */ - fcpl = H5Fget_create_plist(file); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); - - /* Get the file's version information */ - ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr); - CHECK(ret, FAIL, "H5Pget_version"); - VERIFY(super, 2, "H5Pget_version"); - VERIFY(freelist, 0, "H5Pget_version"); - VERIFY(stab, 0, "H5Pget_version"); - VERIFY(shhdr, 0, "H5Pget_version"); - - /* Close FCPL */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - file = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Get the file's version information */ - ret = H5Fget_info1(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info1"); - VERIFY(finfo.super_ext_size, 152, "H5Fget_info1"); - VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1"); - VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1"); - VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1"); - - /* Get the file's creation property list */ - fcpl = H5Fget_create_plist(file); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); - - /* Get the file's version information */ - ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr); - CHECK(ret, FAIL, "H5Pget_version"); - VERIFY(super, 2, "H5Pget_version"); - VERIFY(freelist, 0, "H5Pget_version"); - VERIFY(stab, 0, "H5Pget_version"); - VERIFY(shhdr, 0, "H5Pget_version"); - - /* Close FCPL */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - { /* Test deprecated H5Pget/set_file_space() */ - - H5F_file_space_type_t old_strategy; - hsize_t old_threshold; - hid_t fid; - hid_t ffcpl; - - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold); - CHECK(ret, FAIL, "H5Pget_file_space"); - VERIFY(old_strategy, H5F_FILE_SPACE_ALL, "H5Pget_file_space"); - VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space"); - - /* Set file space strategy and free space section threshold */ - ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0); - CHECK(ret, FAIL, "H5Pget_file_space"); - - /* Get the file space info from the creation property */ - ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold); - CHECK(ret, FAIL, "H5Pget_file_space"); - VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); - VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space"); - - ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_DEFAULT, (hsize_t)3); - CHECK(ret, FAIL, "H5Pget_file_space"); - - ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold); - CHECK(ret, FAIL, "H5Pget_file_space"); - VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); - VERIFY(old_threshold, 3, "H5Pget_file_space"); - - /* Create a file */ - fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - old_strategy = H5F_FILE_SPACE_DEFAULT; - old_threshold = 0; - ffcpl = H5Fget_create_plist(fid); - ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold); - CHECK(ret, FAIL, "H5Pget_file_space"); - VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); - VERIFY(old_threshold, 3, "H5Pget_file_space"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Pclose(ffcpl); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Reopen the file */ - fid = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - old_strategy = H5F_FILE_SPACE_DEFAULT; - old_threshold = 0; - ffcpl = H5Fget_create_plist(fid); - ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold); - CHECK(ret, FAIL, "H5Pget_file_space"); - VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space"); - VERIFY(old_threshold, 3, "H5Pget_file_space"); - - ret = H5Pclose(ffcpl); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } - } - -} /* test_deprec */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - -/**************************************************************** -** -** test_file(): Main low-level file I/O test routine. -** -****************************************************************/ -void -test_file(void) -{ - const char *env_h5_drvr; /* File Driver value from environment */ - hid_t fapl_id = H5I_INVALID_HID; /* VFD-dependent fapl ID */ - bool driver_is_default_compatible; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Low-Level File I/O\n")); - - /* Get the VFD to use */ - env_h5_drvr = getenv(HDF5_DRIVER); - if (env_h5_drvr == NULL) - env_h5_drvr = "nomatch"; - - /* Improved version of VFD-dependent checks */ - fapl_id = h5_fileaccess(); - CHECK(fapl_id, H5I_INVALID_HID, "h5_fileaccess"); - - ret = h5_driver_is_default_vfd_compatible(fapl_id, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - test_file_create(); /* Test file creation(also creation templates)*/ - test_file_open(env_h5_drvr); /* Test file opening */ - test_file_reopen(); /* Test file reopening */ - test_file_close(); /* Test file close behavior */ - test_get_file_id(); /* Test H5Iget_file_id */ - test_get_obj_ids(); /* Test H5Fget_obj_ids for Jira Issue 8528 */ - test_file_perm(); /* Test file access permissions */ - test_file_perm2(); /* Test file access permission again */ - test_file_is_accessible(env_h5_drvr); /* Test detecting HDF5 files correctly */ - test_file_delete(fapl_id); /* Test H5Fdelete */ - test_file_open_dot(); /* Test opening objects with "." for a name */ - test_file_open_overlap(); /* Test opening files in an overlapping manner */ - test_file_getname(); /* Test basic H5Fget_name() functionality */ - test_file_double_root_open(); /* Test opening root group from two files works properly */ - test_file_double_group_open(); /* Test opening same group from two files works properly */ - test_file_double_dataset_open(); /* Test opening same dataset from two files works properly */ - test_file_double_datatype_open(); /* Test opening same named datatype from two files works properly */ - test_file_double_file_dataset_open(true); - test_file_double_file_dataset_open(false); -#if 0 - test_userblock_file_size( - env_h5_drvr); /* Tests that files created with a userblock have the correct size */ - test_cached_stab_info(); /* Tests that files are created with cached stab info in the superblock */ - - if (driver_is_default_compatible) { - test_rw_noupdate(); /* Test to ensure that RW permissions don't write the file unless dirtied */ - } - - test_userblock_alignment( - env_h5_drvr); /* Tests that files created with a userblock and alignment interact properly */ - test_userblock_alignment_paged(env_h5_drvr); /* Tests files created with a userblock and alignment (via - paged aggregation) interact properly */ - test_filespace_info(env_h5_drvr); /* Test file creation public routines: */ - /* H5Pget/set_file_space_strategy() & H5Pget/set_file_space_page_size() */ - /* Skipped testing for multi/split drivers */ - test_file_freespace(env_h5_drvr); /* Test file public routine H5Fget_freespace() */ - /* Skipped testing for multi/split drivers */ - /* Setup for multi/split drivers are there already */ - test_sects_freespace(env_h5_drvr, - true); /* Test file public routine H5Fget_free_sections() for new format */ - /* Skipped testing for multi/split drivers */ - /* Setup for multi/split drivers are there already */ - test_sects_freespace(env_h5_drvr, false); /* Test file public routine H5Fget_free_sections() */ - /* Skipped testing for multi/split drivers */ - - if (driver_is_default_compatible) { - test_filespace_compatible(); /* Test compatibility for file space management */ - - test_filespace_round_compatible(); /* Testing file space compatibility for files from trunk to 1_8 to - trunk */ - test_filespace_1_10_0_compatible(); /* Testing file space compatibility for files from release 1.10.0 - */ - } - - test_libver_bounds(); /* Test compatibility for file space management */ - test_libver_bounds_low_high(env_h5_drvr); - test_libver_macros(); /* Test the macros for library version comparison */ - test_libver_macros2(); /* Test the macros for library version comparison */ - test_incr_filesize(); /* Test H5Fincrement_filesize() and H5Fget_eoa() */ - test_min_dset_ohdr(); /* Test dataset object header minimization */ -#ifndef H5_NO_DEPRECATED_SYMBOLS - test_file_ishdf5(env_h5_drvr); /* Test detecting HDF5 files correctly */ - test_deprec(env_h5_drvr); /* Test deprecated routines */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - - ret = H5Pclose(fapl_id); - CHECK(ret, FAIL, "H5Pclose"); - -} /* test_file() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_file - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_file(void) -{ - H5E_BEGIN_TRY - { - H5Fdelete(SFILE1, H5P_DEFAULT); - H5Fdelete(FILE1, H5P_DEFAULT); - H5Fdelete(FILE2, H5P_DEFAULT); - H5Fdelete(FILE3, H5P_DEFAULT); - H5Fdelete(FILE4, H5P_DEFAULT); - H5Fdelete(FILE5, H5P_DEFAULT); - H5Fdelete(FILE6, H5P_DEFAULT); - H5Fdelete(FILE7, H5P_DEFAULT); - H5Fdelete(DST_FILE, H5P_DEFAULT); - } - H5E_END_TRY -} diff --git a/test/API/tgenprop.c b/test/API/tgenprop.c deleted file mode 100644 index 9a1f551819f..00000000000 --- a/test/API/tgenprop.c +++ /dev/null @@ -1,2195 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tgenprop - * - * Test the Generic Property functionality - * - *************************************************************/ - -#define H5P_FRIEND /*suppress error about including H5Ppkg */ - -/* Define this macro to indicate that the testing APIs should be available */ -#define H5P_TESTING - -#include "testhdf5.h" - -/* #include "H5Dprivate.h" */ /* For Dataset creation property list names */ -/* #include "H5Ppkg.h" */ /* Generic Properties */ - -#define FILENAME "tgenprop.h5" - -/* Property definitions */ -#define CLASS1_NAME "Class 1" -#define CLASS1_PATH "root/Class 1" - -#define CLASS2_NAME "Class 2" -#define CLASS2_PATH "root/Class 1/Class 2" - -/* Property definitions */ -#define PROP1_NAME "Property 1" -int prop1_def = 10; /* Property 1 default value */ -#define PROP1_SIZE sizeof(prop1_def) -#define PROP1_DEF_VALUE (&prop1_def) - -#define PROP2_NAME "Property 2" -float prop2_def = 3.14F; /* Property 2 default value */ -#define PROP2_SIZE sizeof(prop2_def) -#define PROP2_DEF_VALUE (&prop2_def) - -#define PROP3_NAME "Property 3" -char prop3_def[10] = "Ten chars"; /* Property 3 default value */ -#define PROP3_SIZE sizeof(prop3_def) -#define PROP3_DEF_VALUE (&prop3_def) - -#define PROP4_NAME "Property 4" -double prop4_def = 1.41; /* Property 4 default value */ -#define PROP4_SIZE sizeof(prop4_def) -#define PROP4_DEF_VALUE (&prop4_def) - -/* Structs used during iteration */ -typedef struct iter_data_t { - int iter_count; - char **names; -} iter_data_t; - -typedef struct count_data_t { - int count; - hid_t id; -} count_data_t; - -/**************************************************************** -** -** test_genprop_basic_class(): Test basic generic property list code. -** Tests creating new generic classes. -** -****************************************************************/ -static void -test_genprop_basic_class(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t cid2; /* Generic Property class ID */ - hid_t cid3; /* Generic Property class ID */ - char *name; /* Name of class */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Class Creation Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Check class name */ - name = H5Pget_class_name(cid1); - CHECK_PTR(name, "H5Pget_class_name"); - if (strcmp(name, CLASS1_NAME) != 0) - TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); - H5free_memory(name); - - /* Check class parent */ - cid2 = H5Pget_class_parent(cid1); - CHECK_I(cid2, "H5Pget_class_parent"); - - /* Verify class parent correct */ - ret = H5Pequal(cid2, H5P_ROOT); - VERIFY(ret, 1, "H5Pequal"); - - /* Make certain false positives aren't being returned */ - ret = H5Pequal(cid2, H5P_FILE_CREATE); - VERIFY(ret, 0, "H5Pequal"); - - /* Close parent class */ - ret = H5Pclose_class(cid2); - CHECK_I(ret, "H5Pclose_class"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - - /* Create another new generic class, derived from file creation class */ - cid1 = H5Pcreate_class(H5P_FILE_CREATE, CLASS2_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Check class name */ - name = H5Pget_class_name(cid1); - CHECK_PTR(name, "H5Pget_class_name"); - if (strcmp(name, CLASS2_NAME) != 0) - TestErrPrintf("Class names don't match!, name=%s, CLASS2_NAME=%s\n", name, CLASS2_NAME); - H5free_memory(name); - - /* Check class parent */ - cid2 = H5Pget_class_parent(cid1); - CHECK_I(cid2, "H5Pget_class_parent"); - - /* Verify class parent correct */ - ret = H5Pequal(cid2, H5P_FILE_CREATE); - VERIFY(ret, 1, "H5Pequal"); - - /* Check class parent's parent */ - cid3 = H5Pget_class_parent(cid2); - CHECK_I(cid3, "H5Pget_class_parent"); - - /* Verify class parent's parent correct */ - ret = H5Pequal(cid3, H5P_GROUP_CREATE); - VERIFY(ret, 1, "H5Pequal"); - - /* Close parent class's parent */ - ret = H5Pclose_class(cid3); - CHECK_I(ret, "H5Pclose_class"); - - /* Close parent class */ - ret = H5Pclose_class(cid2); - CHECK_I(ret, "H5Pclose_class"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); -} /* end test_genprop_basic_class() */ - -/**************************************************************** -** -** test_genprop_basic_class_prop(): Test basic generic property list code. -** Tests adding properties to generic classes. -** -****************************************************************/ -static void -test_genprop_basic_class_prop(void) -{ - hid_t cid1; /* Generic Property class ID */ - size_t size; /* Size of property */ - size_t nprops; /* Number of properties in class */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Class Properties Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 0, "H5Pget_nprops"); - - /* Check the existence of the first property (should fail) */ - ret = H5Pexist(cid1, PROP1_NAME); - VERIFY(ret, 0, "H5Pexist"); - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Try to insert the first property again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, - NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pregister2"); - - /* Check the existence of the first property */ - ret = H5Pexist(cid1, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check the size of the first property */ - ret = H5Pget_size(cid1, PROP1_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP1_SIZE, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 1, "H5Pget_nprops"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Try to insert the second property again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, - NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pregister2"); - - /* Check the existence of the second property */ - ret = H5Pexist(cid1, PROP2_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check the size of the second property */ - ret = H5Pget_size(cid1, PROP2_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP2_SIZE, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Insert third property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Check the existence of the third property */ - ret = H5Pexist(cid1, PROP3_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check the size of the third property */ - ret = H5Pget_size(cid1, PROP3_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP3_SIZE, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 3, "H5Pget_nprops"); - - /* Unregister first property */ - ret = H5Punregister(cid1, PROP1_NAME); - CHECK_I(ret, "H5Punregister"); - - /* Try to check the size of the first property (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pget_size(cid1, PROP1_NAME, &size); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Unregister second property */ - ret = H5Punregister(cid1, PROP2_NAME); - CHECK_I(ret, "H5Punregister"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 1, "H5Pget_nprops"); - - /* Unregister third property */ - ret = H5Punregister(cid1, PROP3_NAME); - CHECK_I(ret, "H5Punregister"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 0, "H5Pget_nprops"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); -} /* end test_genprop_basic_class_prop() */ - -/**************************************************************** -** -** test_genprop_iter1(): Property iterator for test_genprop_class_iter -** -****************************************************************/ -static int -test_genprop_iter1(hid_t H5_ATTR_UNUSED id, const char *name, void *iter_data) -{ - iter_data_t *idata = (iter_data_t *)iter_data; - - return strcmp(name, idata->names[idata->iter_count++]); -} - -/**************************************************************** -** -** test_genprop_class_iter(): Test basic generic property list code. -** Tests iterating over properties in a generic class. -** -****************************************************************/ -static void -test_genprop_class_iter(void) -{ - hid_t cid1; /* Generic Property class ID */ - size_t nprops; /* Number of properties in class */ - int idx; /* Index to start iteration at */ - struct { /* Struct for iterations */ - int iter_count; - const char **names; - } iter_struct; - const char *pnames[4] = {/* Names of properties for iterator */ - PROP1_NAME, PROP2_NAME, PROP3_NAME, PROP4_NAME}; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Class Property Iteration Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert third property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert third property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 4, "H5Pget_nprops"); - - /* Iterate over all properties in class */ - iter_struct.iter_count = 0; - iter_struct.names = pnames; - ret = H5Piterate(cid1, NULL, test_genprop_iter1, &iter_struct); - VERIFY(ret, 0, "H5Piterate"); - - /* Iterate over last three properties in class */ - idx = iter_struct.iter_count = 1; - ret = H5Piterate(cid1, &idx, test_genprop_iter1, &iter_struct); - VERIFY(ret, 0, "H5Piterate"); - VERIFY(idx, (int)nprops, "H5Piterate"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); -} /* end test_genprop_class_iter() */ - -/**************************************************************** -** -** test_genprop_cls_*_cb1(): Property List callbacks for test_genprop_class_callback -** -****************************************************************/ -static herr_t -test_genprop_cls_crt_cb1(hid_t list_id, void *create_data) -{ - count_data_t *cdata = (count_data_t *)create_data; - - cdata->count++; - cdata->id = list_id; - - return SUCCEED; -} - -static herr_t -test_genprop_cls_cpy_cb1(hid_t new_list_id, hid_t H5_ATTR_UNUSED old_list_id, void *copy_data) -{ - count_data_t *cdata = (count_data_t *)copy_data; - - cdata->count++; - cdata->id = new_list_id; - - return SUCCEED; -} - -static herr_t -test_genprop_cls_cls_cb1(hid_t list_id, void *create_data) -{ - count_data_t *cdata = (count_data_t *)create_data; - - cdata->count++; - cdata->id = list_id; - - return SUCCEED; -} - -/**************************************************************** -** -** test_genprop_class_callback(): Test basic generic property list code. -** Tests callbacks for property lists in a generic class. -** -****************************************************************/ -static void -test_genprop_class_callback(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t cid2; /* Generic Property class ID */ - hid_t lid1; /* Generic Property list ID */ - hid_t lid2; /* Generic Property list ID */ - hid_t lid3; /* Generic Property list ID */ - size_t nprops; /* Number of properties in class */ - struct { /* Struct for callbacks */ - int count; - hid_t id; - } crt_cb_struct, cpy_cb_struct, cls_cb_struct; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Class Callback Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = - H5Pcreate_class(H5P_ROOT, CLASS1_NAME, test_genprop_cls_crt_cb1, &crt_cb_struct, - test_genprop_cls_cpy_cb1, &cpy_cb_struct, test_genprop_cls_cls_cb1, &cls_cb_struct); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert third property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 3, "H5Pget_nprops"); - - /* Initialize class callback structs */ - crt_cb_struct.count = 0; - crt_cb_struct.id = (-1); - cpy_cb_struct.count = 0; - cpy_cb_struct.id = (-1); - cls_cb_struct.count = 0; - cls_cb_struct.id = (-1); - - /* Create a property list from the class */ - lid1 = H5Pcreate(cid1); - CHECK_I(lid1, "H5Pcreate"); - - /* Verify that the creation callback occurred */ - VERIFY(crt_cb_struct.count, 1, "H5Pcreate"); - VERIFY(crt_cb_struct.id, lid1, "H5Pcreate"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 3, "H5Pget_nprops"); - - /* Create another property list from the class */ - lid2 = H5Pcreate(cid1); - CHECK_I(lid2, "H5Pcreate"); - - /* Verify that the creation callback occurred */ - VERIFY(crt_cb_struct.count, 2, "H5Pcreate"); - VERIFY(crt_cb_struct.id, lid2, "H5Pcreate"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid2, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 3, "H5Pget_nprops"); - - /* Create another property list by copying an existing list */ - lid3 = H5Pcopy(lid1); - CHECK_I(lid3, "H5Pcopy"); - - /* Verify that the copy callback occurred */ - VERIFY(cpy_cb_struct.count, 1, "H5Pcopy"); - VERIFY(cpy_cb_struct.id, lid3, "H5Pcopy"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid3, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 3, "H5Pget_nprops"); - - /* Close first list */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - - /* Verify that the close callback occurred */ - VERIFY(cls_cb_struct.count, 1, "H5Pclose"); - VERIFY(cls_cb_struct.id, lid1, "H5Pclose"); - - /* Close second list */ - ret = H5Pclose(lid2); - CHECK_I(ret, "H5Pclose"); - - /* Verify that the close callback occurred */ - VERIFY(cls_cb_struct.count, 2, "H5Pclose"); - VERIFY(cls_cb_struct.id, lid2, "H5Pclose"); - - /* Close third list */ - ret = H5Pclose(lid3); - CHECK_I(ret, "H5Pclose"); - - /* Verify that the close callback occurred */ - VERIFY(cls_cb_struct.count, 3, "H5Pclose"); - VERIFY(cls_cb_struct.id, lid3, "H5Pclose"); - - /* Create another new generic class, derived from first class */ - cid2 = - H5Pcreate_class(cid1, CLASS2_NAME, test_genprop_cls_crt_cb1, &crt_cb_struct, test_genprop_cls_cpy_cb1, - &cpy_cb_struct, test_genprop_cls_cls_cb1, &cls_cb_struct); - CHECK_I(cid2, "H5Pcreate_class"); - - /* Insert fourth property into class (with no callbacks) */ - ret = - H5Pregister2(cid2, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Check the number of properties in class */ - /* (only reports the number of properties in 2nd class) */ - ret = H5Pget_nprops(cid2, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 1, "H5Pget_nprops"); - - /* Create a property list from the 2nd class */ - lid1 = H5Pcreate(cid2); - CHECK_I(lid1, "H5Pcreate"); - - /* Verify that both of the creation callbacks occurred */ - VERIFY(crt_cb_struct.count, 4, "H5Pcreate"); - VERIFY(crt_cb_struct.id, lid1, "H5Pcreate"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 4, "H5Pget_nprops"); - - /* Create another property list by copying existing list */ - lid2 = H5Pcopy(lid1); - CHECK_I(lid2, "H5Pcopy"); - - /* Verify that both of the copy callbacks occurred */ - VERIFY(cpy_cb_struct.count, 3, "H5Pcopy"); - VERIFY(cpy_cb_struct.id, lid2, "H5Pcopy"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid2, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 4, "H5Pget_nprops"); - - /* Close first list */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - - /* Verify that both of the close callbacks occurred */ - VERIFY(cls_cb_struct.count, 5, "H5Pclose"); - VERIFY(cls_cb_struct.id, lid1, "H5Pclose"); - - /* Close second list */ - ret = H5Pclose(lid2); - CHECK_I(ret, "H5Pclose"); - - /* Verify that both of the close callbacks occurred */ - VERIFY(cls_cb_struct.count, 7, "H5Pclose"); - VERIFY(cls_cb_struct.id, lid2, "H5Pclose"); - - /* Close classes */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - ret = H5Pclose_class(cid2); - CHECK_I(ret, "H5Pclose_class"); -} /* end test_genprop_class_callback() */ - -/**************************************************************** -** -** test_genprop_basic_list(): Test basic generic property list code. -** Tests creating new generic property lists. -** -****************************************************************/ -static void -test_genprop_basic_list(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t cid2; /* Generic Property class ID */ - hid_t lid1; /* Generic Property list ID */ - size_t nprops; /* Number of properties */ - size_t size; /* Size of property */ - int prop1_value; /* Value for property #1 */ - float prop2_value; /* Value for property #2 */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Creation Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Add several properties (w/default values) */ - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Create a property list from the class */ - lid1 = H5Pcreate(cid1); - CHECK_I(lid1, "H5Pcreate"); - - /* Get the list's class */ - cid2 = H5Pget_class(lid1); - CHECK_I(cid2, "H5Pget_class"); - - /* Check that the list's class is correct */ - ret = H5Pequal(cid1, cid2); - VERIFY(ret, 1, "H5Pequal"); - - /* Check correct "is a" class/list relationship */ - ret = H5Pisa_class(lid1, cid1); - VERIFY(ret, 1, "H5Pisa_class"); - - /* Check "is a" class/list relationship another way */ - ret = H5Pisa_class(lid1, cid2); - VERIFY(ret, 1, "H5Pisa_class"); - - /* Close class */ - ret = H5Pclose_class(cid2); - CHECK_I(ret, "H5Pclose_class"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Check existence of properties */ - ret = H5Pexist(lid1, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - ret = H5Pexist(lid1, PROP2_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check the sizes of the properties */ - ret = H5Pget_size(lid1, PROP1_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP1_SIZE, "H5Pget_size"); - ret = H5Pget_size(lid1, PROP2_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP2_SIZE, "H5Pget_size"); - - /* Check values of properties (set with default values) */ - ret = H5Pget(lid1, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - ret = H5Pget(lid1, PROP2_NAME, &prop2_value); - CHECK_I(ret, "H5Pget"); - /* Verify the floating-poing value in this way to avoid compiler warning. */ - if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE)) - printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", - (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__); - - /* Close list */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - -} /* end test_genprop_basic_list() */ - -/**************************************************************** -** -** test_genprop_basic_list_prop(): Test basic generic property list code. -** Tests creating new generic property lists and adding and -** removing properties from them. -** -****************************************************************/ -static void -test_genprop_basic_list_prop(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t lid1; /* Generic Property list ID */ - size_t nprops; /* Number of properties */ - int prop1_value; /* Value for property #1 */ - float prop2_value; /* Value for property #2 */ - char prop3_value[10]; /* Property #3 value */ - double prop4_value; /* Property #4 value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Property Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Add several properties (several w/default values) */ - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Create a property list from the class */ - lid1 = H5Pcreate(cid1); - CHECK_I(lid1, "H5Pcreate"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Add temporary properties */ - - /* Insert first temporary property into list (with no callbacks) */ - ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Insert second temporary property into list (with no callbacks) */ - ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 4, "H5Pget_nprops"); - - /* Check existence of all properties */ - ret = H5Pexist(lid1, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - ret = H5Pexist(lid1, PROP2_NAME); - VERIFY(ret, 1, "H5Pexist"); - ret = H5Pexist(lid1, PROP3_NAME); - VERIFY(ret, 1, "H5Pexist"); - ret = H5Pexist(lid1, PROP4_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of permanent properties (set with default values) */ - ret = H5Pget(lid1, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - ret = H5Pget(lid1, PROP2_NAME, &prop2_value); - CHECK_I(ret, "H5Pget"); - /* Verify the floating-poing value in this way to avoid compiler warning. */ - if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE)) - printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", - (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__); - - /* Check values of temporary properties (set with regular values) */ - ret = H5Pget(lid1, PROP3_NAME, &prop3_value); - CHECK_I(ret, "H5Pget"); - if (memcmp(&prop3_value, PROP3_DEF_VALUE, PROP3_SIZE) != 0) - TestErrPrintf("Property #3 doesn't match!, line=%d\n", __LINE__); - ret = H5Pget(lid1, PROP4_NAME, &prop4_value); - CHECK_I(ret, "H5Pget"); - /* Verify the floating-poing value in this way to avoid compiler warning. */ - if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE)) - printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", - *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__); - - /* Delete permanent property */ - ret = H5Premove(lid1, PROP2_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check number of properties */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 3, "H5Pget_nprops"); - - /* Delete temporary property */ - ret = H5Premove(lid1, PROP3_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check number of properties */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Check existence of remaining properties */ - ret = H5Pexist(lid1, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - ret = H5Pexist(lid1, PROP4_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of permanent properties (set with default values) */ - ret = H5Pget(lid1, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - - /* Check values of temporary properties (set with regular values) */ - ret = H5Pget(lid1, PROP4_NAME, &prop4_value); - CHECK_I(ret, "H5Pget"); - /* Verify the floating-poing value in this way to avoid compiler warning. */ - if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE)) - printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", - *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__); - - /* Close list */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - -} /* end test_genprop_basic_list_prop() */ - -/**************************************************************** -** -** test_genprop_iter2(): Property iterator for test_genprop_list_iter -** -****************************************************************/ -static int -test_genprop_iter2(hid_t H5_ATTR_UNUSED id, const char *name, void *iter_data) -{ - iter_data_t *idata = (iter_data_t *)iter_data; - - return strcmp(name, idata->names[idata->iter_count++]); -} - -/**************************************************************** -** -** test_genprop_list_iter(): Test basic generic property list code. -** Tests iterating over generic property list properties. -** -****************************************************************/ -static void -test_genprop_list_iter(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t lid1; /* Generic Property list ID */ - size_t nprops; /* Number of properties */ - int idx; /* Index to start iteration at */ - struct { /* Struct for iterations */ - int iter_count; - const char **names; - } iter_struct; - const char *pnames[4] = {/* Names of properties for iterator */ - PROP3_NAME, PROP4_NAME, PROP1_NAME, PROP2_NAME}; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Generic Property List Iteration Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Add several properties (several w/default values) */ - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Create a property list from the class */ - lid1 = H5Pcreate(cid1); - CHECK_I(lid1, "H5Pcreate"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Add temporary properties */ - - /* Insert first temporary property into class (with no callbacks) */ - ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Insert second temporary property into class (with no callbacks) */ - ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Check the number of properties in list */ - ret = H5Pget_nprops(lid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 4, "H5Pget_nprops"); - - /* Iterate over all properties in list */ - iter_struct.iter_count = 0; - iter_struct.names = pnames; - ret = H5Piterate(lid1, NULL, test_genprop_iter2, &iter_struct); - VERIFY(ret, 0, "H5Piterate"); - - /* Iterate over last three properties in list */ - idx = iter_struct.iter_count = 1; - ret = H5Piterate(lid1, &idx, test_genprop_iter2, &iter_struct); - VERIFY(ret, 0, "H5Piterate"); - VERIFY(idx, (int)nprops, "H5Piterate"); - - /* Close list */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - -} /* end test_genprop_list_iter() */ - -typedef struct { - /* Creation information */ - int crt_count; - char *crt_name; - void *crt_value; - - /* Set information */ - int set_count; - hid_t set_plist_id; - char *set_name; - void *set_value; - - /* Get information */ - int get_count; - hid_t get_plist_id; - char *get_name; - void *get_value; - - /* Delete information */ - int del_count; - hid_t del_plist_id; - char *del_name; - void *del_value; - - /* Copy information */ - int cop_count; - char *cop_name; - void *cop_value; - - /* Compare information */ - int cmp_count; - - /* Close information */ - int cls_count; - char *cls_name; - void *cls_value; -} prop_cb_info; - -/* Global variables for Callback information */ -prop_cb_info prop1_cb_info; /* Callback statistics for property #1 */ -prop_cb_info prop2_cb_info; /* Callback statistics for property #2 */ -prop_cb_info prop3_cb_info; /* Callback statistics for property #3 */ - -/**************************************************************** -** -** test_genprop_cls_cpy_cb2(): Property Class callback for test_genprop_list_callback -** -****************************************************************/ -static herr_t -test_genprop_cls_cpy_cb2(hid_t new_list_id, hid_t H5_ATTR_UNUSED old_list_id, void *create_data) -{ - count_data_t *cdata = (count_data_t *)create_data; - - cdata->count++; - cdata->id = new_list_id; - - return SUCCEED; -} - -/**************************************************************** -** -** test_genprop_prop_crt_cb1(): Property creation callback for test_genprop_list_callback -** -****************************************************************/ -static herr_t -test_genprop_prop_crt_cb1(const char *name, size_t size, void *def_value) -{ - /* Set the information from the creation call */ - prop1_cb_info.crt_count++; - prop1_cb_info.crt_name = strdup(name); - prop1_cb_info.crt_value = malloc(size); - memcpy(prop1_cb_info.crt_value, def_value, size); - - return (SUCCEED); -} - -/**************************************************************** -** -** test_genprop_prop_set_cb1(): Property set callback for test_genprop_list_callback -** -****************************************************************/ -static herr_t -test_genprop_prop_set_cb1(hid_t plist_id, const char *name, size_t size, void *value) -{ - /* Set the information from the set call */ - prop1_cb_info.set_count++; - prop1_cb_info.set_plist_id = plist_id; - if (prop1_cb_info.set_name == NULL) - prop1_cb_info.set_name = strdup(name); - if (prop1_cb_info.set_value == NULL) - prop1_cb_info.set_value = malloc(size); - memcpy(prop1_cb_info.set_value, value, size); - - return (SUCCEED); -} - -/**************************************************************** -** -** test_genprop_prop_get_cb1(): Property get callback for test_genprop_list_callback -** -****************************************************************/ -static herr_t -test_genprop_prop_get_cb1(hid_t plist_id, const char *name, size_t size, void *value) -{ - /* Set the information from the get call */ - prop1_cb_info.get_count++; - prop1_cb_info.get_plist_id = plist_id; - if (prop1_cb_info.get_name == NULL) - prop1_cb_info.get_name = strdup(name); - if (prop1_cb_info.get_value == NULL) - prop1_cb_info.get_value = malloc(size); - memcpy(prop1_cb_info.get_value, value, size); - - return (SUCCEED); -} - -/**************************************************************** -** -** test_genprop_prop_cop_cb1(): Property copy callback for test_genprop_list_callback -** -****************************************************************/ -static herr_t -test_genprop_prop_cop_cb1(const char *name, size_t size, void *value) -{ - /* Set the information from the get call */ - prop1_cb_info.cop_count++; - if (prop1_cb_info.cop_name == NULL) - prop1_cb_info.cop_name = strdup(name); - if (prop1_cb_info.cop_value == NULL) - prop1_cb_info.cop_value = malloc(size); - memcpy(prop1_cb_info.cop_value, value, size); - - return (SUCCEED); -} - -/**************************************************************** -** -** test_genprop_prop_cmp_cb1(): Property comparison callback for test_genprop_list_callback -** -****************************************************************/ -static int -test_genprop_prop_cmp_cb1(const void *value1, const void *value2, size_t size) -{ - /* Set the information from the comparison call */ - prop1_cb_info.cmp_count++; - - return (memcmp(value1, value2, size)); -} - -/**************************************************************** -** -** test_genprop_prop_cmp_cb3(): Property comparison callback for test_genprop_list_callback -** -****************************************************************/ -static int -test_genprop_prop_cmp_cb3(const void *value1, const void *value2, size_t size) -{ - /* Set the information from the comparison call */ - prop3_cb_info.cmp_count++; - - return (memcmp(value1, value2, size)); -} - -/**************************************************************** -** -** test_genprop_prop_cls_cb1(): Property close callback for test_genprop_list_callback -** -****************************************************************/ -static herr_t -test_genprop_prop_cls_cb1(const char *name, size_t size, void *value) -{ - /* Set the information from the close call */ - prop1_cb_info.cls_count++; - if (prop1_cb_info.cls_name == NULL) - prop1_cb_info.cls_name = strdup(name); - if (prop1_cb_info.cls_value == NULL) - prop1_cb_info.cls_value = malloc(size); - memcpy(prop1_cb_info.cls_value, value, size); - - return (SUCCEED); -} - -/**************************************************************** -** -** test_genprop_prop_del_cb2(): Property delete callback for test_genprop_list_callback -** -****************************************************************/ -static herr_t -test_genprop_prop_del_cb2(hid_t plist_id, const char *name, size_t size, void *value) -{ - /* Set the information from the delete call */ - prop2_cb_info.del_count++; - prop2_cb_info.del_plist_id = plist_id; - prop2_cb_info.del_name = strdup(name); - prop2_cb_info.del_value = malloc(size); - memcpy(prop2_cb_info.del_value, value, size); - - return (SUCCEED); -} - -/**************************************************************** -** -** test_genprop_list_callback(): Test basic generic property list code. -** Tests callbacks for properties in a generic property list. -** -****************************************************************/ -static void -test_genprop_list_callback(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t lid1; /* Generic Property list ID */ - hid_t lid2; /* 2nd Generic Property list ID */ - size_t nprops; /* Number of properties in class */ - int prop1_value; /* Value for property #1 */ - int prop1_new_value = 20; /* Property #1 new value */ - float prop2_value; /* Value for property #2 */ - char prop3_value[10]; /* Property #3 value */ - char prop3_new_value[10] = "10 chairs"; /* Property #3 new value */ - double prop4_value; /* Property #4 value */ - struct { /* Struct for callbacks */ - int count; - hid_t id; - } cop_cb_struct; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Property Callback Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, test_genprop_cls_cpy_cb2, &cop_cb_struct, NULL, - NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Insert first property into class (with callbacks) */ - ret = H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, test_genprop_prop_crt_cb1, - test_genprop_prop_set_cb1, test_genprop_prop_get_cb1, NULL, test_genprop_prop_cop_cb1, - test_genprop_prop_cmp_cb1, test_genprop_prop_cls_cb1); - CHECK_I(ret, "H5Pregister2"); - - /* Insert second property into class (with only delete callback) */ - ret = H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, - test_genprop_prop_del_cb2, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert third property into class (with only compare callback) */ - ret = H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, - test_genprop_prop_cmp_cb3, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert fourth property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 4, "H5Pget_nprops"); - - /* Initialize class callback structs */ - cop_cb_struct.count = 0; - cop_cb_struct.id = (-1); - - /* Initialize callback information for properties tracked */ - memset(&prop1_cb_info, 0, sizeof(prop_cb_info)); - memset(&prop2_cb_info, 0, sizeof(prop_cb_info)); - memset(&prop3_cb_info, 0, sizeof(prop_cb_info)); - - /* Create a property list from the class */ - lid1 = H5Pcreate(cid1); - CHECK_I(lid1, "H5Pcreate"); - - /* The compare callback should not have been called once on property 1, as - * the property is always copied */ - VERIFY(prop1_cb_info.cmp_count, 0, "H5Pcreate"); - /* The compare callback should not have been called on property 3, as there - * is no create callback */ - VERIFY(prop3_cb_info.cmp_count, 0, "H5Pcreate"); - - /* Verify creation callback information for properties tracked */ - VERIFY(prop1_cb_info.crt_count, 1, "H5Pcreate"); - if (strcmp(prop1_cb_info.crt_name, PROP1_NAME) != 0) - TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); - if (memcmp(prop1_cb_info.crt_value, PROP1_DEF_VALUE, PROP1_SIZE) != 0) - TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); - - /* Check values of permanent properties (set with default values) */ - ret = H5Pget(lid1, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - /* The compare callback should not have been called */ - VERIFY(prop1_cb_info.cmp_count, 0, "H5Pget"); - ret = H5Pget(lid1, PROP2_NAME, &prop2_value); - CHECK_I(ret, "H5Pget"); - /* Verify the floating-poing value in this way to avoid compiler warning. */ - if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE)) - printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", - (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__); - - /* Check values of temporary properties (set with regular values) */ - ret = H5Pget(lid1, PROP3_NAME, &prop3_value); - CHECK_I(ret, "H5Pget"); - if (memcmp(&prop3_value, PROP3_DEF_VALUE, PROP3_SIZE) != 0) - TestErrPrintf("Property #3 doesn't match!, line=%d\n", __LINE__); - /* The compare callback should not have been called, as there is no get - * callback for this property */ - VERIFY(prop3_cb_info.cmp_count, 0, "H5Pget"); - ret = H5Pget(lid1, PROP4_NAME, &prop4_value); - CHECK_I(ret, "H5Pget"); - /* Verify the floating-poing value in this way to avoid compiler warning. */ - if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE)) - printf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget", - *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__); - - /* Verify get callback information for properties tracked */ - VERIFY(prop1_cb_info.get_count, 1, "H5Pget"); - VERIFY(prop1_cb_info.get_plist_id, lid1, "H5Pget"); - if (strcmp(prop1_cb_info.get_name, PROP1_NAME) != 0) - TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); - if (memcmp(prop1_cb_info.get_value, PROP1_DEF_VALUE, PROP1_SIZE) != 0) - TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); - - /* Set value of property #1 to different value */ - ret = H5Pset(lid1, PROP1_NAME, &prop1_new_value); - CHECK_I(ret, "H5Pset"); - - /* Verify set callback information for properties tracked */ - VERIFY(prop1_cb_info.set_count, 1, "H5Pset"); - VERIFY(prop1_cb_info.set_plist_id, lid1, "H5Pset"); - if (strcmp(prop1_cb_info.set_name, PROP1_NAME) != 0) - TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); - if (memcmp(prop1_cb_info.set_value, &prop1_new_value, PROP1_SIZE) != 0) - TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); - - /* The compare callback should not have been called */ - VERIFY(prop1_cb_info.cmp_count, 0, "H5Pset"); - - /* Set value of property #3 to different value */ - ret = H5Pset(lid1, PROP3_NAME, prop3_new_value); - CHECK_I(ret, "H5Pset"); - - /* The compare callback should not have been called */ - VERIFY(prop3_cb_info.cmp_count, 0, "H5Pset"); - - /* Check new value of tracked properties */ - ret = H5Pget(lid1, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, prop1_new_value, "H5Pget"); - - /* Verify get callback information again for properties tracked */ - VERIFY(prop1_cb_info.get_count, 2, "H5Pget"); - VERIFY(prop1_cb_info.get_plist_id, lid1, "H5Pget"); - if (strcmp(prop1_cb_info.get_name, PROP1_NAME) != 0) - TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); - if (memcmp(prop1_cb_info.get_value, &prop1_new_value, PROP1_SIZE) != 0) - TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); - - /* Delete property #2 */ - ret = H5Premove(lid1, PROP2_NAME); - CHECK_I(ret, "H5Premove"); - - /* Verify delete callback information for properties tracked */ - VERIFY(prop2_cb_info.del_count, 1, "H5Premove"); - VERIFY(prop2_cb_info.del_plist_id, lid1, "H5Premove"); - if (strcmp(prop2_cb_info.del_name, PROP2_NAME) != 0) - TestErrPrintf("Property #2 name doesn't match!, line=%d\n", __LINE__); - if (memcmp(prop2_cb_info.del_value, PROP2_DEF_VALUE, PROP2_SIZE) != 0) - TestErrPrintf("Property #2 value doesn't match!, line=%d\n", __LINE__); - - /* Copy first list */ - lid2 = H5Pcopy(lid1); - CHECK_I(lid2, "H5Pcopy"); - - /* Verify copy callback information for properties tracked */ - VERIFY(prop1_cb_info.cop_count, 1, "H5Pcopy"); - if (strcmp(prop1_cb_info.cop_name, PROP1_NAME) != 0) - TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); - if (memcmp(prop1_cb_info.cop_value, &prop1_new_value, PROP1_SIZE) != 0) - TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); - - /* Verify that the class creation callback occurred */ - VERIFY(cop_cb_struct.count, 1, "H5Pcopy"); - VERIFY(cop_cb_struct.id, lid2, "H5Pcopy"); - - /* Compare the two lists */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 1, "H5Pequal"); - - /* Verify compare callback information for properties tracked */ - VERIFY(prop1_cb_info.cmp_count, 1, "H5Pequal"); - VERIFY(prop3_cb_info.cmp_count, 1, "H5Pequal"); - - /* Close first list */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - - /* Verify close callback information for properties tracked */ - VERIFY(prop1_cb_info.cls_count, 1, "H5Pclose"); - if (strcmp(prop1_cb_info.cls_name, PROP1_NAME) != 0) - TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__); - if (memcmp(prop1_cb_info.cls_value, &prop1_new_value, PROP1_SIZE) != 0) - TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__); - - /* Close second list */ - ret = H5Pclose(lid2); - CHECK_I(ret, "H5Pclose"); - - /* Verify close callback information for properties tracked */ - VERIFY(prop1_cb_info.cls_count, 2, "H5Pclose"); - - /* Free memory allocated for tracking properties */ - free(prop1_cb_info.crt_name); - free(prop1_cb_info.crt_value); - free(prop1_cb_info.get_name); - free(prop1_cb_info.get_value); - free(prop1_cb_info.set_name); - free(prop1_cb_info.set_value); - free(prop1_cb_info.cop_name); - free(prop1_cb_info.cop_value); - free(prop1_cb_info.cls_name); - free(prop1_cb_info.cls_value); - free(prop2_cb_info.del_name); - free(prop2_cb_info.del_value); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); -} /* end test_genprop_list_callback() */ - -/**************************************************************** -** -** test_genprop_list_addprop(): Test adding properties to a -** standard HDF5 property list and verify that the library -** ignores the extra properties. -** -****************************************************************/ -static void -test_genprop_list_addprop(void) -{ - hid_t fid; /* File ID */ - hid_t did; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t pid; /* Property List ID */ - int prop1_value; /* Value for property #1 */ - herr_t ret; /* Generic return value */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create scalar dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a dataset creation property list */ - pid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(pid, FAIL, "H5Pcreate"); - - /* Insert temporary property into class (with no callbacks) */ - ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Check existence of added property */ - ret = H5Pexist(pid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of property (set with default value) */ - ret = H5Pget(pid, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - - /* Create a dataset */ - did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check existence of added property (after using property list) */ - ret = H5Pexist(pid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of property (set with default value) (after using property list) */ - ret = H5Pget(pid, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - - /* Close property list */ - ret = H5Pclose(pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_genprop_list_addprop() */ - -/**************************************************************** -** -** test_genprop_class_addprop(): Test adding properties to a -** standard HDF5 property class and verify that the library -** ignores the extra properties and continues to recognize the -** derived class as a valid version of the derived-from class. -** -****************************************************************/ -static void -test_genprop_class_addprop(void) -{ - hid_t fid; /* File ID */ - hid_t did; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t cid; /* Property Class ID */ - hid_t pid; /* Property List ID */ - int prop1_value; /* Value for property #1 */ - herr_t ret; /* Generic return value */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create scalar dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a new class, derived from the dataset creation property list class */ - cid = H5Pcreate_class(H5P_DATASET_CREATE, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid, "H5Pcreate_class"); -#if 0 - /* Check existence of an original property */ - ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME); - VERIFY(ret, 1, "H5Pexist"); -#endif - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); -#if 0 - /* Check existence of an original property */ - ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME); - VERIFY(ret, 1, "H5Pexist"); -#endif - /* Check existence of added property */ - ret = H5Pexist(cid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Create a derived dataset creation property list */ - pid = H5Pcreate(cid); - CHECK(pid, FAIL, "H5Pcreate"); -#if 0 - /* Check existence of an original property */ - ret = H5Pexist(pid, H5O_CRT_PIPELINE_NAME); - VERIFY(ret, 1, "H5Pexist"); -#endif - /* Check existence of added property */ - ret = H5Pexist(pid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of property (set with default value) */ - ret = H5Pget(pid, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); -#if 0 - /* Check existence of an original property (in class) */ - ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME); - VERIFY(ret, 1, "H5Pexist"); -#endif - /* Check existence of first added property (in class) */ - ret = H5Pexist(cid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check existence of second added property (in class) */ - ret = H5Pexist(cid, PROP2_NAME); - VERIFY(ret, 1, "H5Pexist"); -#if 0 - /* Check existence of an original property (in property list) */ - ret = H5Pexist(pid, H5O_CRT_PIPELINE_NAME); - VERIFY(ret, 1, "H5Pexist"); -#endif - /* Check existence of first added property (in property list) */ - ret = H5Pexist(pid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check existence of second added property (in property list) (should not exist) */ - ret = H5Pexist(pid, PROP2_NAME); - VERIFY(ret, 0, "H5Pexist"); - - /* Create a dataset */ - did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check existence of added property (after using property list) */ - ret = H5Pexist(pid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of property (set with default value) (after using property list) */ - ret = H5Pget(pid, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - - /* Close property class */ - ret = H5Pclose_class(cid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close property list */ - ret = H5Pclose(pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_genprop_class_addprop() */ - -/**************************************************************** -** -** test_genprop_list_add_remove_prop(): Test adding then removing the -** same properties to a standard HDF5 property list. This is testing -** also for a memory leak that could be caused by not freeing the -** removed property resources from the property list. -** -****************************************************************/ -static void -test_genprop_list_add_remove_prop(void) -{ - hid_t pid; /* Property List ID */ - herr_t ret; /* Generic return value */ - - /* Create a dataset creation property list */ - pid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(pid, FAIL, "H5Pcreate"); - - /* Insert temporary property into class (with no callbacks) */ - ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Delete added property */ - ret = H5Premove(pid, PROP1_NAME); - CHECK_I(ret, "H5Premove"); - - /* Insert temporary property into class (with no callbacks) */ - ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Delete added property */ - ret = H5Premove(pid, PROP1_NAME); - CHECK_I(ret, "H5Premove"); - - /* Close property list */ - ret = H5Pclose(pid); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_genprop_list_add_remove_prop() */ - -/**************************************************************** -** -** test_genprop_equal(): Test basic generic property list code. -** More tests for H5Pequal() -** -****************************************************************/ -static void -test_genprop_equal(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t lid1; /* Generic Property list ID */ - hid_t lid2; /* Generic Property list ID */ - int prop1_new_value = 20; /* Property #1 new value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Generic Property List Equal Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Create a property list from the class */ - lid1 = H5Pcreate(cid1); - CHECK_I(lid1, "H5Pcreate"); - - /* Copy the property list */ - lid2 = H5Pcopy(lid1); - CHECK_I(lid2, "H5Pcopy"); - - /* Check that the lists are equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 1, "H5Pequal"); - - /* Set property in first list to another value */ - ret = H5Pset(lid1, PROP1_NAME, &prop1_new_value); - CHECK_I(ret, "H5Pset"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Set property in first list back to default */ - ret = H5Pset(lid1, PROP1_NAME, PROP1_DEF_VALUE); - CHECK_I(ret, "H5Pset"); - - /* Check that the lists are still equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 1, "H5Pequal"); - - /* Insert first temporary property into first list (with no callbacks) */ - ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Insert first temporary property into second list (with no callbacks) */ - ret = H5Pinsert2(lid2, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Check that the lists are equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 1, "H5Pequal"); - - /* Insert second temporary property into second list (with no callbacks) */ - ret = H5Pinsert2(lid2, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Insert second temporary property into first list (with no callbacks) */ - ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert2"); - - /* Check that the lists are equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 1, "H5Pequal"); - - /* Remove first temporary property from first list */ - ret = H5Premove(lid1, PROP3_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Remove second temporary property from second list */ - ret = H5Premove(lid2, PROP4_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Remove first temporary property from second list */ - ret = H5Premove(lid2, PROP3_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Remove first permanent property from first list */ - ret = H5Premove(lid1, PROP1_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Remove second temporary property from first list */ - ret = H5Premove(lid1, PROP4_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check that the lists are not equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 0, "H5Pequal"); - - /* Remove first permanent property from second list */ - ret = H5Premove(lid2, PROP1_NAME); - CHECK_I(ret, "H5Premove"); - - /* Check that the lists are equal */ - ret = H5Pequal(lid1, lid2); - VERIFY(ret, 1, "H5Pequal"); - - /* Close property lists */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - ret = H5Pclose(lid2); - CHECK_I(ret, "H5Pclose"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); -} /* ent test_genprop_equal() */ - -/**************************************************************** -** -** test_genprop_path(): Test basic generic property list code. -** Tests for class paths -** -****************************************************************/ -static void -test_genprop_path(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t cid2; /* Generic Property class ID */ -#if 0 - hid_t cid3; /* Generic Property class ID */ - char *path; /* Class path */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Generic Property List Class Path Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); -#if 0 - /* Get full path for first class */ - path = H5P__get_class_path_test(cid1); - CHECK_PTR(path, "H5P__get_class_path_test"); - if (strcmp(path, CLASS1_PATH) != 0) - TestErrPrintf("Class names don't match!, path=%s, CLASS1_PATH=%s\n", path, CLASS1_PATH); - H5free_memory(path); -#endif - /* Create another new generic class, derived from first class */ - cid2 = H5Pcreate_class(cid1, CLASS2_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid2, "H5Pcreate_class"); - - /* Insert second property into class (with no callbacks) */ - ret = - H5Pregister2(cid2, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); -#if 0 - /* Get full path for second class */ - path = H5P__get_class_path_test(cid2); - CHECK_PTR(path, "H5P__get_class_path_test"); - if (strcmp(path, CLASS2_PATH) != 0) - TestErrPrintf("Class names don't match!, path=%s, CLASS2_PATH=%s\n", path, CLASS2_PATH); - - /* Open a copy of the class with the path name */ - cid3 = H5P__open_class_path_test(path); - CHECK_I(cid3, "H5P__open_class_path_test"); - - /* Check that the classes are equal */ - ret = H5Pequal(cid2, cid3); - VERIFY(ret, 1, "H5Pequal"); - - /* Release the path string */ - H5free_memory(path); - - /* Close class */ - ret = H5Pclose_class(cid3); - CHECK_I(ret, "H5Pclose_class"); -#endif - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - - /* Close class */ - ret = H5Pclose_class(cid2); - CHECK_I(ret, "H5Pclose_class"); - -} /* ent test_genprop_path() */ - -/**************************************************************** -** -** test_genprop_refcount(): Test basic generic property list code. -** Tests for correct reference counting -** -****************************************************************/ -static void -test_genprop_refcount(void) -{ - hid_t cid1; /* Generic Property class ID */ - hid_t lid1; /* Generic Property class ID */ - char *name; /* Name of class */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Generic Property List Reference Count Functionality\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Insert first property into class (with no callbacks) */ - ret = - H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister2"); - - /* Create a new generic list, derived from the root of the class hierarchy */ - lid1 = H5Pcreate(cid1); - CHECK_I(lid1, "H5Pcreate"); - - /* Check class name */ - name = H5Pget_class_name(cid1); - CHECK_PTR(name, "H5Pget_class_name"); - if (strcmp(name, CLASS1_NAME) != 0) - TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); - H5free_memory(name); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - - /* Get the list's class */ - cid1 = H5Pget_class(lid1); - CHECK_I(cid1, "H5Pget_class"); - - /* Check correct "is a" class/list relationship */ - ret = H5Pisa_class(lid1, cid1); - VERIFY(ret, 1, "H5Pisa_class"); - - /* Check class name */ - name = H5Pget_class_name(cid1); - CHECK_PTR(name, "H5Pget_class_name"); - if (strcmp(name, CLASS1_NAME) != 0) - TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); - H5free_memory(name); - - /* Close list */ - ret = H5Pclose(lid1); - CHECK_I(ret, "H5Pclose"); - - /* Check class name */ - name = H5Pget_class_name(cid1); - CHECK_PTR(name, "H5Pget_class_name"); - if (strcmp(name, CLASS1_NAME) != 0) - TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME); - H5free_memory(name); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); - -} /* ent test_genprop_refcount() */ - -#ifndef H5_NO_DEPRECATED_SYMBOLS -/**************************************************************** -** -** test_genprop_deprec_class(): Test basic generic property list code. -** Tests deprecated property class API routines. -** -****************************************************************/ -static void -test_genprop_deprec_class(void) -{ - hid_t cid1; /* Generic Property class ID */ - size_t size; /* Size of property */ - size_t nprops; /* Number of properties in class */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deprecated Generic Property List Functions\n")); - - /* Create a new generic class, derived from the root of the class hierarchy */ - cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(cid1, "H5Pcreate_class"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 0, "H5Pget_nprops"); - - /* Check the existence of the first property (should fail) */ - ret = H5Pexist(cid1, PROP1_NAME); - VERIFY(ret, 0, "H5Pexist"); - - /* Insert first property into class (with no callbacks) */ - ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister1"); - - /* Try to insert the first property again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pregister1"); - - /* Check the existence of the first property */ - ret = H5Pexist(cid1, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check the size of the first property */ - ret = H5Pget_size(cid1, PROP1_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP1_SIZE, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 1, "H5Pget_nprops"); - - /* Insert second property into class (with no callbacks) */ - ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister1"); - - /* Try to insert the second property again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pregister1"); - - /* Check the existence of the second property */ - ret = H5Pexist(cid1, PROP2_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check the size of the second property */ - ret = H5Pget_size(cid1, PROP2_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP2_SIZE, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Insert third property into class (with no callbacks) */ - ret = H5Pregister1(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pregister1"); - - /* Check the existence of the third property */ - ret = H5Pexist(cid1, PROP3_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check the size of the third property */ - ret = H5Pget_size(cid1, PROP3_NAME, &size); - CHECK_I(ret, "H5Pget_size"); - VERIFY(size, PROP3_SIZE, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 3, "H5Pget_nprops"); - - /* Unregister first property */ - ret = H5Punregister(cid1, PROP1_NAME); - CHECK_I(ret, "H5Punregister"); - - /* Try to check the size of the first property (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pget_size(cid1, PROP1_NAME, &size); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pget_size"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 2, "H5Pget_nprops"); - - /* Unregister second property */ - ret = H5Punregister(cid1, PROP2_NAME); - CHECK_I(ret, "H5Punregister"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 1, "H5Pget_nprops"); - - /* Unregister third property */ - ret = H5Punregister(cid1, PROP3_NAME); - CHECK_I(ret, "H5Punregister"); - - /* Check the number of properties in class */ - ret = H5Pget_nprops(cid1, &nprops); - CHECK_I(ret, "H5Pget_nprops"); - VERIFY(nprops, 0, "H5Pget_nprops"); - - /* Close class */ - ret = H5Pclose_class(cid1); - CHECK_I(ret, "H5Pclose_class"); -} /* end test_genprop_deprec_class() */ - -/**************************************************************** -** -** test_genprop_deprec2(): Test basic generic property list code. -** Tests deprecated property list API routines. -** -****************************************************************/ -static void -test_genprop_deprec_list(void) -{ - hid_t fid; /* File ID */ - hid_t did; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t pid; /* Property List ID */ - int prop1_value; /* Value for property #1 */ - herr_t ret; /* Generic return value */ - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create scalar dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a dataset creation property list */ - pid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(pid, FAIL, "H5Pcreate"); - - /* Insert temporary property into class (with no callbacks) */ - ret = H5Pinsert1(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL); - CHECK_I(ret, "H5Pinsert1"); - - /* Check existence of added property */ - ret = H5Pexist(pid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of property (set with default value) */ - ret = H5Pget(pid, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - - /* Create a dataset */ - did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check existence of added property (after using property list) */ - ret = H5Pexist(pid, PROP1_NAME); - VERIFY(ret, 1, "H5Pexist"); - - /* Check values of property (set with default value) (after using property list) */ - ret = H5Pget(pid, PROP1_NAME, &prop1_value); - CHECK_I(ret, "H5Pget"); - VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget"); - - /* Close property list */ - ret = H5Pclose(pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_genprop_deprec_list() */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - -/**************************************************************** -** -** test_genprop(): Main generic property testing routine. -** -****************************************************************/ -void -test_genprop(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing Generic Properties\n")); - - /* These tests use the same file... */ - test_genprop_basic_class(); /* Test basic code for creating a generic class */ - test_genprop_basic_class_prop(); /* Test basic code for adding properties to a generic class */ - test_genprop_class_iter(); /* Test code for iterating over properties in a generic class */ - test_genprop_class_callback(); /* Test code for property class callbacks */ - - test_genprop_basic_list(); /* Test basic code for creating a generic property list */ - test_genprop_basic_list_prop(); /* Test basic code for adding properties to a generic property list */ - test_genprop_list_iter(); /* Test basic code for iterating over properties in a generic property list */ - test_genprop_list_callback(); /* Test code for property list callbacks */ - - test_genprop_list_addprop(); /* Test adding properties to HDF5 property list */ - test_genprop_class_addprop(); /* Test adding properties to HDF5 property class */ - - test_genprop_list_add_remove_prop(); /* Test adding and removing the same property several times to HDF5 - property list */ - - test_genprop_equal(); /* Tests for more H5Pequal verification */ - test_genprop_path(); /* Tests for class path verification */ - test_genprop_refcount(); /* Tests for class reference counting */ - -#ifndef H5_NO_DEPRECATED_SYMBOLS - test_genprop_deprec_class(); /* Tests for deprecated routines */ - test_genprop_deprec_list(); /* Tests for deprecated routines */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - -} /* test_genprop() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_genprop - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_genprop(void) -{ - H5Fdelete(FILENAME, H5P_DEFAULT); -} diff --git a/test/API/th5o.c b/test/API/th5o.c deleted file mode 100644 index 8a052ee7702..00000000000 --- a/test/API/th5o.c +++ /dev/null @@ -1,1886 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: th5o - * - * Test public H5O functions for accessing - * - *************************************************************/ - -#include "testhdf5.h" - -#if 0 -#include "H5Fprivate.h" -#include "H5VLprivate.h" -#include "H5VLnative_private.h" -#endif - -#define TEST_FILENAME "th5o_file.h5" - -#define RANK 2 -#define DIM0 5 -#define DIM1 10 - -#define TEST6_DIM1 100 -#define TEST6_DIM2 100 - -/**************************************************************** -** -** test_h5o_open(): Test H5Oopen function. -** -****************************************************************/ -static void -test_h5o_open(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - char filename[1024]; - hsize_t dims[RANK]; - H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ - H5G_info_t ginfo; /* Group info struct */ - H5T_class_t type_class; /* Class of the datatype */ - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Oopen\n")); - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Now make sure that H5Oopen can open all three types of objects */ - grp = H5Oopen(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Oopen"); - dtype = H5Oopen(fid, "group/datatype", H5P_DEFAULT); - CHECK(dtype, FAIL, "H5Oopen"); - /* Check that we can use the group as a valid location */ - dset = H5Oopen(grp, "/dataset", H5P_DEFAULT); - CHECK(dset, FAIL, "H5Oopen"); - - /* Make sure that each is the right kind of ID */ - id_type = H5Iget_type(grp); - VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); - id_type = H5Iget_type(dtype); - VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); - id_type = H5Iget_type(dset); - VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); - - /* Do something more complex with each of the IDs to make sure they "work" */ - ret = H5Gget_info(grp, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ - - type_class = H5Tget_class(dtype); - VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); - - dspace = H5Dget_space(dset); - CHECK(dspace, FAIL, "H5Dget_space"); - - /* Close the IDs */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Trying to open objects with bogus names should fail gracefully */ - H5E_BEGIN_TRY - { - grp = H5Oopen(fid, "bogus_group", H5P_DEFAULT); - VERIFY(grp, FAIL, "H5Oopen"); - dtype = H5Oopen(fid, "group/bogus_datatype", H5P_DEFAULT); - VERIFY(dtype, FAIL, "H5Oopen"); - dset = H5Oopen(fid, "/bogus_dataset", H5P_DEFAULT); - VERIFY(dset, FAIL, "H5Oopen"); - } - H5E_END_TRY - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Trying to open an object with a bogus file ID should fail */ - H5E_BEGIN_TRY - { - dset = H5Oopen(fid, "dataset", H5P_DEFAULT); - VERIFY(dset, FAIL, "H5Oopen"); - } - H5E_END_TRY -} /* test_h5o_open() */ - -/**************************************************************** -** -** test_h5o_close(): Test H5Oclose function. -** -****************************************************************/ -static void -test_h5o_close(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - char filename[1024]; - hsize_t dims[RANK]; - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Oclose\n")); - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group and close it with H5Oclose */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - VERIFY_TYPE(H5Iget_type(grp), H5I_GROUP, H5I_type_t, "%d", "H5Iget_type"); - ret = H5Oclose(grp); - CHECK(ret, FAIL, "H5Oclose"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - ret = H5Oclose(dtype); - CHECK(ret, FAIL, "H5Oclose"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - ret = H5Oclose(dset); - CHECK(ret, FAIL, "H5Oclose"); - - /* Attempting to close the data space with H5Oclose should fail */ - H5E_BEGIN_TRY - { - ret = H5Oclose(dspace); - VERIFY(ret, FAIL, "H5Oclose"); - } - H5E_END_TRY - /* Close the dataspace for real */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Make sure that H5Oclose can close objects opened with H5Oopen */ - grp = H5Oopen(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Oopen"); - dtype = H5Oopen(fid, "group/datatype", H5P_DEFAULT); - CHECK(dtype, FAIL, "H5Oopen"); - dset = H5Oopen(fid, "dataset", H5P_DEFAULT); - CHECK(dset, FAIL, "H5Oopen"); - - ret = H5Oclose(grp); - CHECK(ret, FAIL, "H5Oclose"); - ret = H5Oclose(dtype); - CHECK(ret, FAIL, "H5Oclose"); - ret = H5Oclose(dset); - CHECK(ret, FAIL, "H5Oclose"); - - /* Make sure H5Oclose can close objects opened with H5*open */ - grp = H5Gopen2(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gopen2"); - dtype = H5Topen2(fid, "group/datatype", H5P_DEFAULT); - CHECK(dtype, FAIL, "H5Topen2"); - dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dopen2"); - - ret = H5Oclose(grp); - CHECK(ret, FAIL, "H5Oclose"); - ret = H5Oclose(dtype); - CHECK(ret, FAIL, "H5Oclose"); - ret = H5Oclose(dset); - CHECK(ret, FAIL, "H5Oclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} - -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS -/**************************************************************** -** -** test_h5o_open_by_addr(): Test H5Oopen_by_addr function. -** -****************************************************************/ -static void -test_h5o_open_by_addr(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - char filename[1024]; - H5L_info2_t li; /* Buffer for H5Lget_info2 */ - haddr_t grp_addr; /* Addresses for objects */ - haddr_t dset_addr; - haddr_t dtype_addr; - hsize_t dims[RANK]; - H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ - H5G_info_t ginfo; /* Group info struct */ - H5T_class_t type_class; /* Class of the datatype */ - herr_t ret; /* Value returned from API calls */ - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Get address for each object */ - ret = H5Lget_info2(fid, "group", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info2"); - ret = H5VLnative_token_to_addr(fid, li.u.token, &grp_addr); - CHECK(ret, FAIL, "H5VLnative_token_to_addr"); - - ret = H5Lget_info2(fid, "group/datatype", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info2"); - ret = H5VLnative_token_to_addr(fid, li.u.token, &dtype_addr); - CHECK(ret, FAIL, "H5VLnative_token_to_addr"); - - ret = H5Lget_info2(fid, "dataset", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info2"); - ret = H5VLnative_token_to_addr(fid, li.u.token, &dset_addr); - CHECK(ret, FAIL, "H5VLnative_token_to_addr"); - - /* Now make sure that H5Oopen_by_addr can open all three types of objects */ - grp = H5Oopen_by_addr(fid, grp_addr); - CHECK(grp, FAIL, "H5Oopen_by_addr"); - dtype = H5Oopen_by_addr(fid, dtype_addr); - CHECK(dtype, FAIL, "H5Oopen_by_addr"); - /* Check that we can use the group ID as a valid location */ - dset = H5Oopen_by_addr(grp, dset_addr); - CHECK(dset, FAIL, "H5Oopen_by_addr"); - - /* Make sure that each is the right kind of ID */ - id_type = H5Iget_type(grp); - VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); - id_type = H5Iget_type(dtype); - VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); - id_type = H5Iget_type(dset); - VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); - - /* Do something more complex with each of the IDs to make sure they "work" */ - ret = H5Gget_info(grp, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ - - type_class = H5Tget_class(dtype); - VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); - - dspace = H5Dget_space(dset); - CHECK(dspace, FAIL, "H5Dget_space"); - - /* Close the IDs */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Try giving some bogus values to H5O_open_by_addr. */ - /* Try to open an object with a bad address */ - grp_addr += 20; - H5E_BEGIN_TRY - { - grp = H5Oopen_by_addr(fid, grp_addr); - } - H5E_END_TRY - VERIFY(grp, FAIL, "H5Oopen_by_addr"); - - /* For instance, an objectno smaller than the end of the file's superblock should - * trigger an error */ - grp_addr = 10; - H5E_BEGIN_TRY - { - grp = H5Oopen_by_addr(fid, grp_addr); - } - H5E_END_TRY - VERIFY(grp, FAIL, "H5Oopen_by_addr"); - - /* Likewise, an objectno larger than the size of the file should fail */ - grp_addr = 0; - grp_addr = 1000000000; - H5E_BEGIN_TRY - { - grp = H5Oopen_by_addr(fid, grp_addr); - } - H5E_END_TRY - VERIFY(grp, FAIL, "H5Oopen_by_addr"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Also, trying to open an object without a valid location should fail */ - H5E_BEGIN_TRY - { - dtype = H5Oopen_by_addr(fid, dtype_addr); - } - H5E_END_TRY - VERIFY(dtype, FAIL, "H5Oopen_by_addr"); -} /* test_h5o_open_by_addr() */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - -/**************************************************************** -** -** test_h5o_open_by_token(): Test H5Oopen_by_token function. -** -****************************************************************/ -static void -test_h5o_open_by_token(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - char filename[1024]; - H5L_info2_t li; /* Buffer for H5Lget_info */ - hsize_t dims[RANK]; - H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ - H5G_info_t ginfo; /* Group info struct */ - H5T_class_t type_class; /* Class of the datatype */ - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Oopen_by_token\n")); - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Make sure that H5Oopen_by_token can open all three types of objects */ - ret = H5Lget_info2(fid, "group", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info"); - grp = H5Oopen_by_token(fid, li.u.token); - CHECK(grp, FAIL, "H5Oopen_by_token"); - - ret = H5Lget_info2(fid, "group/datatype", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info"); - dtype = H5Oopen_by_token(fid, li.u.token); - CHECK(dtype, FAIL, "H5Oopen_by_token"); - - ret = H5Lget_info2(fid, "dataset", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info"); - /* Check that we can use the group ID as a valid location */ - dset = H5Oopen_by_token(grp, li.u.token); - CHECK(dset, FAIL, "H5Oopen_by_token"); - - /* Make sure that each is the right kind of ID */ - id_type = H5Iget_type(grp); - VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); - id_type = H5Iget_type(dtype); - VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); - id_type = H5Iget_type(dset); - VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); - - /* Do something more complex with each of the IDs to make sure they "work" */ - ret = H5Gget_info(grp, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ - - type_class = H5Tget_class(dtype); - VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); - - dspace = H5Dget_space(dset); - CHECK(dspace, FAIL, "H5Dget_space"); - - /* Close the IDs */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Try giving some bogus values to H5O_open_by_token */ - /* Try opening an object using H5O_TOKEN_UNDEF (should fail) */ - H5E_BEGIN_TRY - { - dtype = H5Oopen_by_token(fid, H5O_TOKEN_UNDEF); - } - H5E_END_TRY - VERIFY(dtype, FAIL, "H5Oopen_by_token"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Also, trying to open an object without a valid location (should fail) */ - H5E_BEGIN_TRY - { - dtype = H5Oopen_by_token(fid, li.u.token); - } - H5E_END_TRY - VERIFY(dtype, FAIL, "H5Oopen_by_token"); - -} /* test_h5o_open_by_token() */ - -/**************************************************************** -** -** test_h5o_refcount(): Test H5O refcounting functions. -** -****************************************************************/ -static void -test_h5o_refcount(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - char filename[1024]; - H5O_info2_t oinfo; /* Object info struct */ - hsize_t dims[RANK]; - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing retrieval of object reference count with H5Oget_info\n")); - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Get ref counts for each object. They should all be 1, since each object has a hard link. */ - ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - - /* Increment each object's reference count. */ - ret = H5Oincr_refcount(grp); - CHECK(ret, FAIL, "H5Oincr_refcount"); - ret = H5Oincr_refcount(dtype); - CHECK(ret, FAIL, "H5Oincr_refcount"); - ret = H5Oincr_refcount(dset); - CHECK(ret, FAIL, "H5Oincr_refcount"); - - /* Get ref counts for each object. They should all be 2 now. */ - ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); - - /* Decrement the reference counts and check that they decrease back to 1. */ - ret = H5Odecr_refcount(grp); - CHECK(ret, FAIL, "H5Odecr_refcount"); - ret = H5Odecr_refcount(dtype); - CHECK(ret, FAIL, "H5Odecr_refcount"); - ret = H5Odecr_refcount(dset); - CHECK(ret, FAIL, "H5Odecr_refcount"); - - ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - - /* Increment the reference counts and then close the file to make sure the increment is permanent */ - ret = H5Oincr_refcount(grp); - CHECK(ret, FAIL, "H5Oincr_refcount"); - ret = H5Oincr_refcount(dtype); - CHECK(ret, FAIL, "H5Oincr_refcount"); - ret = H5Oincr_refcount(dset); - CHECK(ret, FAIL, "H5Oincr_refcount"); - - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file and check that the reference counts were really incremented */ - fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - grp = H5Gopen2(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gopen2"); - dtype = H5Topen2(fid, "datatype", H5P_DEFAULT); - CHECK(dtype, FAIL, "H5Topen2"); - dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dopen2"); - - ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3"); - - /* Decrement the reference counts and close the file */ - ret = H5Odecr_refcount(grp); - CHECK(ret, FAIL, "H5Odecr_refcount"); - ret = H5Odecr_refcount(dtype); - CHECK(ret, FAIL, "H5Odecr_refcount"); - ret = H5Odecr_refcount(dset); - CHECK(ret, FAIL, "H5Odecr_refcount"); - - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file and check that the reference counts were really decremented */ - fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - grp = H5Gopen2(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gopen2"); - dtype = H5Topen2(fid, "datatype", H5P_DEFAULT); - CHECK(dtype, FAIL, "H5Topen2"); - dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dopen2"); - - ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3"); - - /* Close the IDs */ - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Make sure that bogus IDs return errors properly */ - H5E_BEGIN_TRY - { - ret = H5Oincr_refcount(grp); - VERIFY(ret, FAIL, "H5Oincr_refcount"); - ret = H5Oincr_refcount(dtype); - VERIFY(ret, FAIL, "H5Oincr_refcount"); - ret = H5Oincr_refcount(dset); - VERIFY(ret, FAIL, "H5Oincr_refcount"); - ret = H5Odecr_refcount(grp); - VERIFY(ret, FAIL, "H5Odecr_refcount"); - ret = H5Odecr_refcount(dtype); - VERIFY(ret, FAIL, "H5Odecr_refcount"); - ret = H5Odecr_refcount(dset); - VERIFY(ret, FAIL, "H5Odecr_refcount"); - } - H5E_END_TRY - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_h5o_refcount() */ - -/**************************************************************** -** -** test_h5o_plist(): Test object creation properties -** -****************************************************************/ -static void -test_h5o_plist(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - hid_t fapl; /* File access property list */ - hid_t gcpl, dcpl, tcpl; /* Object creation properties */ - char filename[1024]; - unsigned def_max_compact, def_min_dense; /* Default phase change parameters */ - unsigned max_compact, min_dense; /* Actual phase change parameters */ - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Object creation properties\n")); - - /* Make a FAPL that uses the "use the latest version of the format" flag */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Set the "use the latest version of the format" bounds for creating objects in the file */ - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - h5_fixname(TEST_FILENAME, fapl, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create group, dataset & named datatype creation property lists */ - gcpl = H5Pcreate(H5P_GROUP_CREATE); - CHECK(gcpl, FAIL, "H5Pcreate"); - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - tcpl = H5Pcreate(H5P_DATATYPE_CREATE); - CHECK(tcpl, FAIL, "H5Pcreate"); - - /* Retrieve default attribute phase change values */ - ret = H5Pget_attr_phase_change(gcpl, &def_max_compact, &def_min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - - /* Set non-default attribute phase change values on each creation property list */ - ret = H5Pset_attr_phase_change(gcpl, def_max_compact + 1, def_min_dense - 1); - CHECK(ret, FAIL, "H5Pset_attr_phase_change"); - ret = H5Pset_attr_phase_change(dcpl, def_max_compact + 1, def_min_dense - 1); - CHECK(ret, FAIL, "H5Pset_attr_phase_change"); - ret = H5Pset_attr_phase_change(tcpl, def_max_compact + 1, def_min_dense - 1); - CHECK(ret, FAIL, "H5Pset_attr_phase_change"); - - /* Retrieve attribute phase change values on each creation property list and verify */ - ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - - /* Create a group, dataset, and committed datatype within the file, - * using the respective type of creation property lists. - */ - - /* Create the group anonymously and link it in */ - grp = H5Gcreate_anon(fid, gcpl, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate_anon"); - ret = H5Olink(grp, fid, "group", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Olink"); - - /* Commit the type inside the group anonymously and link it in */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit_anon(fid, dtype, tcpl, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit_anon"); - ret = H5Olink(dtype, fid, "datatype", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Olink"); - - /* Create the dataspace for the dataset. */ - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); - - /* Create the dataset anonymously and link it in */ - dset = H5Dcreate_anon(fid, H5T_NATIVE_INT, dspace, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate_anon"); - ret = H5Olink(dset, fid, "dataset", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Olink"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close current creation property lists */ - ret = H5Pclose(gcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(tcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Retrieve each object's creation property list */ - gcpl = H5Gget_create_plist(grp); - CHECK(gcpl, FAIL, "H5Gget_create_plist"); - tcpl = H5Tget_create_plist(dtype); - CHECK(tcpl, FAIL, "H5Tget_create_plist"); - dcpl = H5Dget_create_plist(dset); - CHECK(dcpl, FAIL, "H5Dget_create_plist"); - - /* Retrieve attribute phase change values on each creation property list and verify */ - ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - - /* Close current objects */ - ret = H5Pclose(gcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(tcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file and check that the object creation properties persist */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open objects */ - grp = H5Gopen2(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gopen2"); - dtype = H5Topen2(fid, "datatype", H5P_DEFAULT); - CHECK(dtype, FAIL, "H5Topen2"); - dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dopen2"); - - /* Retrieve each object's creation property list */ - gcpl = H5Gget_create_plist(grp); - CHECK(gcpl, FAIL, "H5Gget_create_plist"); - tcpl = H5Tget_create_plist(dtype); - CHECK(tcpl, FAIL, "H5Tget_create_plist"); - dcpl = H5Dget_create_plist(dset); - CHECK(dcpl, FAIL, "H5Dget_create_plist"); - - /* Retrieve attribute phase change values on each creation property list and verify */ - ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense); - CHECK(ret, FAIL, "H5Pget_attr_phase_change"); - VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change"); - VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change"); - - /* Close current objects */ - ret = H5Pclose(gcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(tcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close the FAPL */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); -} /* test_h5o_plist() */ - -/**************************************************************** -** -** test_h5o_link(): Test creating link to object -** -****************************************************************/ -static void -test_h5o_link(void) -{ - hid_t file_id = -1; - hid_t group_id = -1; - hid_t space_id = -1; - hid_t dset_id = -1; - hid_t type_id = -1; - hid_t fapl_id = -1; - hid_t lcpl_id = -1; - char filename[1024]; - hsize_t dims[2] = {TEST6_DIM1, TEST6_DIM2}; - htri_t committed; /* Whether the named datatype is committed */ - H5F_libver_t low, high; /* File format bounds */ - int *wdata; - int *rdata; - int i, n; - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Olink\n")); - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Allocate memory buffers */ - /* (These are treated as 2-D buffers) */ - wdata = (int *)malloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int)); - CHECK_PTR(wdata, "malloc"); - rdata = (int *)malloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int)); - CHECK_PTR(rdata, "malloc"); - - /* Initialize the raw data */ - for (i = n = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++) - wdata[i] = n++; - - /* Create the dataspace */ - space_id = H5Screate_simple(2, dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - /* Create LCPL with intermediate group creation flag set */ - lcpl_id = H5Pcreate(H5P_LINK_CREATE); - CHECK(lcpl_id, FAIL, "H5Pcreate"); - ret = H5Pset_create_intermediate_group(lcpl_id, true); - CHECK(ret, FAIL, "H5Pset_create_intermediate_group"); - - /* Create a file access property list */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl_id, FAIL, "H5Pcreate"); - - /* Loop through all the combinations of low/high library format bounds */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - - /* Set version bounds */ - H5E_BEGIN_TRY - { - ret = H5Pset_libver_bounds(fapl_id, low, high); - } - H5E_END_TRY - - if (ret < 0) /* Invalid low/high combinations */ - continue; - - /* Create a new HDF5 file */ - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Close the FAPL */ - ret = H5Pclose(fapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Create and commit a datatype with no name */ - type_id = H5Tcopy(H5T_NATIVE_INT); - CHECK(type_id, FAIL, "H5Fcreate"); - ret = H5Tcommit_anon(file_id, type_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit_anon"); - committed = H5Tcommitted(type_id); - VERIFY(committed, true, "H5Tcommitted"); - - /* Create a dataset with no name using the committed datatype*/ - dset_id = H5Dcreate_anon(file_id, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate_anon"); - - /* Verify that we can write to and read from the dataset */ - - /* Write the data to the dataset */ - ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read the data back */ - ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data */ - for (i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++) - VERIFY(wdata[i], rdata[i], "H5Dread"); - - /* Create a group with no name*/ - group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gcreate_anon"); - - /* Link nameless datatype into nameless group */ - ret = H5Olink(type_id, group_id, "datatype", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Olink"); - - /* Link nameless dataset into nameless group with intermediate group */ - ret = H5Olink(dset_id, group_id, "inter_group/dataset", lcpl_id, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Olink"); - - /* Close IDs for dataset and datatype */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); - - /* Re-open datatype using new link */ - type_id = H5Topen2(group_id, "datatype", H5P_DEFAULT); - CHECK(type_id, FAIL, "H5Topen2"); - - /* Link nameless group to root group and close the group ID*/ - ret = H5Olink(group_id, file_id, "/group", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Olink"); - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open dataset through root group and verify its data */ - dset_id = H5Dopen2(file_id, "/group/inter_group/dataset", H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - /* Read data from dataset */ - ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - for (i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++) - VERIFY(wdata[i], rdata[i], "H5Dread"); - - /* Close open IDs */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - } /* for high */ - } /* for low */ - - /* Close remaining IDs */ - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(lcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Release buffers */ - free(wdata); - free(rdata); -} /* end test_h5o_link() */ - -#if 0 -/**************************************************************** -** -** test_h5o_comment(): Test H5Oset(get)_comment functions. -** -****************************************************************/ -static void -test_h5o_comment(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - hid_t attr_space, attr_id; - char filename[1024]; - hsize_t dims[RANK]; - hsize_t attr_dims = 1; - int attr_value = 5; - const char *file_comment = "file comment"; - const char *grp_comment = "group comment"; - const char *dset_comment = "dataset comment"; - const char *dtype_comment = "datatype comment"; - char check_comment[64]; - ssize_t comment_len = 0; - ssize_t len; - herr_t ret; /* Value returned from API calls */ - int ret_value; - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create an attribute for the file */ - attr_space = H5Screate_simple(1, &attr_dims, NULL); - CHECK(attr_space, FAIL, "H5Screate_simple"); - attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - - /* Putting a comment on the file through its attribute */ - ret = H5Oset_comment(attr_id, file_comment); - CHECK(ret, FAIL, "H5Oset_comment"); - - ret = H5Sclose(attr_space); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - - /* Putting a comment on the group */ - ret = H5Oset_comment(grp, grp_comment); - CHECK(ret, FAIL, "H5Oset_comment"); - - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Putting a comment on the committed data type */ - ret = H5Oset_comment(dtype, dtype_comment); - CHECK(ret, FAIL, "H5Oset_comment"); - - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - /* Putting a comment on the dataset */ - ret = H5Oset_comment(dset, dset_comment); - CHECK(ret, FAIL, "H5Oset_comment"); - - /* Putting a comment on the dataspace. It's supposed to fail. */ - H5E_BEGIN_TRY - { - ret = H5Oset_comment(dspace, "dataspace comment"); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Oset_comment"); - - /* Close the file */ - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Now make sure that the comments are correct all 4 types of objects */ - /* Open file */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Getting the comment on the file and verify it */ - comment_len = H5Oget_comment(fid, NULL, (size_t)0); - CHECK(comment_len, FAIL, "H5Oget_comment"); - - len = H5Oget_comment(fid, check_comment, (size_t)comment_len + 1); - CHECK(len, FAIL, "H5Oget_comment"); - - ret_value = strcmp(file_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment"); - - /* Open the group */ - grp = H5Gopen2(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gopen2"); - - /* Getting the comment on the group and verify it */ - comment_len = H5Oget_comment(grp, NULL, (size_t)0); - CHECK(comment_len, FAIL, "H5Oget_comment"); - - len = H5Oget_comment(grp, check_comment, (size_t)comment_len + 1); - CHECK(len, FAIL, "H5Oget_comment"); - - ret_value = strcmp(grp_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment"); - - /* Open the datatype */ - dtype = H5Topen2(fid, "group/datatype", H5P_DEFAULT); - CHECK(dtype, FAIL, "H5Topen2"); - - /* Getting the comment on the datatype and verify it */ - comment_len = H5Oget_comment(dtype, NULL, (size_t)0); - CHECK(comment_len, FAIL, "H5Oget_comment"); - - len = H5Oget_comment(dtype, check_comment, (size_t)comment_len + 1); - CHECK(len, FAIL, "H5Oget_comment"); - - ret_value = strcmp(dtype_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment"); - - /* Open the dataset */ - dset = H5Dopen2(fid, "dataset", H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dopen2"); - - /* Getting the comment on the dataset and verify it */ - comment_len = H5Oget_comment(dset, NULL, (size_t)0); - CHECK(comment_len, FAIL, "H5Oget_comment"); - - len = H5Oget_comment(dset, check_comment, (size_t)comment_len + 1); - CHECK(ret, len, "H5Oget_comment"); - - ret_value = strcmp(dset_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment"); - - /* Close the IDs */ - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* test_h5o_comment() */ - -/**************************************************************** -** -** test_h5o_comment_by_name(): Test H5Oset(get)_comment_by_name functions. -** -****************************************************************/ -static void -test_h5o_comment_by_name(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - hid_t attr_space, attr_id; - char filename[1024]; - hsize_t dims[RANK]; - hsize_t attr_dims = 1; - int attr_value = 5; - const char *file_comment = "file comment by name"; - const char *grp_comment = "group comment by name"; - const char *dset_comment = "dataset comment by name"; - const char *dtype_comment = "datatype comment by name"; - char check_comment[64]; - ssize_t comment_len = 0; - ssize_t len; - herr_t ret; /* Value returned from API calls */ - int ret_value; - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create an attribute for the file */ - attr_space = H5Screate_simple(1, &attr_dims, NULL); - CHECK(attr_space, FAIL, "H5Screate_simple"); - attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value); - CHECK(ret, FAIL, "H5Awrite"); - - /* Putting a comment on the file through its attribute */ - ret = H5Oset_comment_by_name(attr_id, ".", file_comment, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oset_comment_by_name"); - - ret = H5Sclose(attr_space); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - - /* Putting a comment on the group */ - ret = H5Oset_comment_by_name(fid, "group", grp_comment, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oset_comment_by_name"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Putting a comment on the committed data type */ - ret = H5Oset_comment_by_name(grp, "datatype", dtype_comment, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oset_comment_by_name"); - - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - /* Putting a comment on the dataset */ - ret = H5Oset_comment_by_name(fid, "dataset", dset_comment, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oset_comment_by_name"); - - /* Putting a comment on the dataspace. It's supposed to fail. */ - H5E_BEGIN_TRY - { - ret = H5Oset_comment_by_name(dspace, ".", "dataspace comment", H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Oset_comment"); - - /* Close the file */ - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Now make sure that the comments are correct all 4 types of objects */ - /* Open file */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Getting the comment on the file and verify it */ - comment_len = H5Oget_comment_by_name(fid, ".", NULL, (size_t)0, H5P_DEFAULT); - CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); - - len = H5Oget_comment_by_name(fid, ".", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); - CHECK(len, FAIL, "H5Oget_comment_by_name"); - - ret_value = strcmp(file_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment_by_name"); - - /* Open the group */ - grp = H5Gopen2(fid, "group", H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gopen2"); - - /* Getting the comment on the group and verify it */ - comment_len = H5Oget_comment_by_name(fid, "group", NULL, (size_t)0, H5P_DEFAULT); - CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); - - len = H5Oget_comment_by_name(fid, "group", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); - CHECK(len, FAIL, "H5Oget_comment_by_name"); - - ret_value = strcmp(grp_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment_by_name"); - - /* Getting the comment on the datatype and verify it */ - comment_len = H5Oget_comment_by_name(grp, "datatype", NULL, (size_t)0, H5P_DEFAULT); - CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); - - len = H5Oget_comment_by_name(grp, "datatype", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); - CHECK(len, FAIL, "H5Oget_comment"); - - ret_value = strcmp(dtype_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment_by_name"); - - /* Getting the comment on the dataset and verify it */ - comment_len = H5Oget_comment_by_name(fid, "dataset", NULL, (size_t)0, H5P_DEFAULT); - CHECK(comment_len, FAIL, "H5Oget_comment_by_name"); - - len = H5Oget_comment_by_name(fid, "dataset", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); - CHECK(len, FAIL, "H5Oget_comment_by_name"); - - ret_value = strcmp(dset_comment, check_comment); - VERIFY(ret_value, 0, "H5Oget_comment_by_name"); - - /* Close the IDs */ - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* test_h5o_comment_by_name() */ -#endif - -/**************************************************************** -** -** test_h5o_getinfo_same_file(): Test that querying the object info for -** objects in the same file will return the same file "number" -** -****************************************************************/ -static void -test_h5o_getinfo_same_file(void) -{ - hid_t fid1, fid2; /* HDF5 File ID */ - hid_t gid1, gid2; /* Group IDs */ - char filename[1024]; - H5O_info2_t oinfo1, oinfo2; /* Object info structs */ - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing H5Oget_info on objects in same file\n")); - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create two groups in the file */ - gid1 = H5Gcreate2(fid1, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - gid2 = H5Gcreate2(fid1, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gcreate2"); - - /* Reset object info */ - memset(&oinfo1, 0, sizeof(oinfo1)); - memset(&oinfo2, 0, sizeof(oinfo2)); - - /* Query the object info for each object, through group IDs */ - ret = H5Oget_info3(gid1, &oinfo1, H5O_INFO_BASIC); - CHECK(ret, FAIL, "H5Oget_info3"); - ret = H5Oget_info3(gid2, &oinfo2, H5O_INFO_BASIC); - CHECK(ret, FAIL, "H5Oget_info3"); - - VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); - - /* Reset object info */ - memset(&oinfo1, 0, sizeof(oinfo1)); - memset(&oinfo2, 0, sizeof(oinfo2)); - - /* Query the object info for each object, by name */ - ret = H5Oget_info_by_name3(fid1, "group1", &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid1, "group2", &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - - VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); - - /* Close everything */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open file twice */ - fid1 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - fid2 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Open the two groups in the file */ - gid1 = H5Gopen2(fid1, "group1", H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gopen2"); - gid2 = H5Gopen2(fid2, "group2", H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - - /* Reset object info */ - memset(&oinfo1, 0, sizeof(oinfo1)); - memset(&oinfo2, 0, sizeof(oinfo2)); - - /* Query the object info for each object, through group IDs */ - ret = H5Oget_info3(gid1, &oinfo1, H5O_INFO_BASIC); - CHECK(ret, FAIL, "H5Oget_info3"); - ret = H5Oget_info3(gid2, &oinfo2, H5O_INFO_BASIC); - CHECK(ret, FAIL, "H5Oget_info3"); - - VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); - - /* Reset object info */ - memset(&oinfo1, 0, sizeof(oinfo1)); - memset(&oinfo2, 0, sizeof(oinfo2)); - - /* Query the object info for each object, by name */ - ret = H5Oget_info_by_name3(fid1, "group1", &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(fid1, "group2", &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - - VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3"); - - /* Close everything */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - -} /* test_h5o_getinfo_same_file() */ - -#ifndef H5_NO_DEPRECATED_SYMBOLS -#if 0 -/**************************************************************** -** -** test_h5o_open_by_addr_deprec(): Test H5Oopen_by_addr function. -** -****************************************************************/ -static void -test_h5o_open_by_addr_deprec(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t grp, dset, dtype, dspace; /* Object identifiers */ - char filename[1024]; - H5L_info1_t li; /* Buffer for H5Lget_info1 */ - haddr_t grp_addr; /* Addresses for objects */ - haddr_t dset_addr; - haddr_t dtype_addr; - hsize_t dims[RANK]; - H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ - H5G_info_t ginfo; /* Group info struct */ - H5T_class_t type_class; /* Class of the datatype */ - herr_t ret; /* Value returned from API calls */ - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create a new HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group, dataset, and committed datatype within the file */ - /* Create the group */ - grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp, FAIL, "H5Gcreate2"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - /* Commit the type inside the group */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create the data space for the dataset. */ - dims[0] = DIM0; - dims[1] = DIM1; - dspace = H5Screate_simple(RANK, dims, NULL); - CHECK(dspace, FAIL, "H5Screate_simple"); - - /* Create the dataset. */ - dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Get address for each object */ - ret = H5Lget_info1(fid, "group", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info"); - grp_addr = li.u.address; - ret = H5Lget_info1(fid, "group/datatype", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info"); - dtype_addr = li.u.address; - ret = H5Lget_info1(fid, "dataset", &li, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info"); - dset_addr = li.u.address; - - /* Now make sure that H5Oopen_by_addr can open all three types of objects */ - grp = H5Oopen_by_addr(fid, grp_addr); - CHECK(grp, FAIL, "H5Oopen_by_addr"); - dtype = H5Oopen_by_addr(fid, dtype_addr); - CHECK(dtype, FAIL, "H5Oopen_by_addr"); - /* Check that we can use the group ID as a valid location */ - dset = H5Oopen_by_addr(grp, dset_addr); - CHECK(dset, FAIL, "H5Oopen_by_addr"); - - /* Make sure that each is the right kind of ID */ - id_type = H5Iget_type(grp); - VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID"); - id_type = H5Iget_type(dtype); - VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID"); - id_type = H5Iget_type(dset); - VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID"); - - /* Do something more complex with each of the IDs to make sure they "work" */ - ret = H5Gget_info(grp, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */ - - type_class = H5Tget_class(dtype); - VERIFY(type_class, H5T_INTEGER, "H5Tget_class"); - - dspace = H5Dget_space(dset); - CHECK(dspace, FAIL, "H5Dget_space"); - - /* Close the IDs */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Try giving some bogus values to H5O_open_by_addr. */ - /* Try to open an object with a bad address */ - grp_addr += 20; - H5E_BEGIN_TRY - { - grp = H5Oopen_by_addr(fid, grp_addr); - } - H5E_END_TRY - VERIFY(grp, FAIL, "H5Oopen_by_addr"); - - /* For instance, an objectno smaller than the end of the file's superblock should - * trigger an error */ - grp_addr = 10; - H5E_BEGIN_TRY - { - grp = H5Oopen_by_addr(fid, grp_addr); - } - H5E_END_TRY - VERIFY(grp, FAIL, "H5Oopen_by_addr"); - - /* Likewise, an objectno larger than the size of the file should fail */ - grp_addr = 0; - grp_addr = 1000000000; - H5E_BEGIN_TRY - { - grp = H5Oopen_by_addr(fid, grp_addr); - } - H5E_END_TRY - VERIFY(grp, FAIL, "H5Oopen_by_addr"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Also, trying to open an object without a valid location should fail */ - H5E_BEGIN_TRY - { - dtype = H5Oopen_by_addr(fid, dtype_addr); - } - H5E_END_TRY - VERIFY(dtype, FAIL, "H5Oopen_by_addr"); -} /* test_h5o_open_by_addr_deprec() */ - -/**************************************************************** -** -** visit_obj_cb(): -** This is the callback function invoked by H5Ovisit1() in -** test_h5o_getinfo_visit(): -** --Verify that the object info returned to the callback -** function is the same as H5Oget_info2(). -** -****************************************************************/ -static int -visit_obj_cb(hid_t group_id, const char *name, const H5O_info1_t *oinfo1, void H5_ATTR_UNUSED *_op_data) -{ - H5O_info1_t oinfo2; /* Object info structs */ - - /* Verify the object info for "group1", "group2" and the root group */ - if (!(strcmp(name, "group1"))) { - H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); - VERIFY(oinfo1->num_attrs, oinfo2.num_attrs, "obj info from H5Ovisit1"); - } - else if (!(strcmp(name, "group2"))) { - H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_HDR, H5P_DEFAULT); - VERIFY(oinfo1->hdr.nmesgs, oinfo2.hdr.nmesgs, "obj info from H5Ovisit1/H5Oget_info2"); - VERIFY(oinfo1->hdr.nchunks, oinfo2.hdr.nchunks, "obj info from H5Ovisit1/H5Oget_info2"); - } - else if (!(strcmp(name, "."))) { - H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_META_SIZE, H5P_DEFAULT); - VERIFY(oinfo1->meta_size.obj.index_size, oinfo2.meta_size.obj.index_size, - "obj info from H5Ovisit1/H5Oget_info2"); - VERIFY(oinfo1->meta_size.obj.heap_size, oinfo2.meta_size.obj.heap_size, - "obj info from H5Ovisit1/H5Oget_info2"); - } - - return (H5_ITER_CONT); -} /* end visit_obj_cb() */ - -/**************************************************************** -** -** test_h5o_getinfo_visit(): -** Verify that the object info returned via H5Oget_info1() -** and H5Oget_info2() are the same. -** Verify that the object info retrieved via H5Ovisit1() is -** the same as H5Oget_info2(). -** -****************************************************************/ -static void -test_h5o_getinfo_visit(void) -{ - hid_t fid = -1; /* HDF5 File ID */ - hid_t gid1 = -1, gid2 = -1; /* Group IDs */ - hid_t sid = -1; /* Dataspace ID */ - hid_t aid = -1; /* Attribute ID */ - char filename[1024]; - H5O_info1_t oinfo1, oinfo2; /* Object info structs */ - char attrname[25]; /* Attribute name */ - int j; /* Local index variable */ - herr_t ret; /* Value returned from API calls */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing info returned by H5Oget_info vs H5Ovisit\n")); - - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - - /* Create an HDF5 file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create "group1" in the file */ - gid1 = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - - /* Create dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Attach 10 attributes to "group1" */ - for (j = 0; j < 10; j++) { - /* Create the attribute name */ - snprintf(attrname, sizeof(attrname), "attr%u", j); - /* Create the attribute */ - aid = H5Acreate2(gid1, attrname, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - } - - /* Create "group2" in the file */ - gid2 = H5Gcreate2(fid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gcreate2"); - - /* Reset object info */ - memset(&oinfo1, 0, sizeof(oinfo1)); - memset(&oinfo2, 0, sizeof(oinfo2)); - - /* Query the object info for "group1" via H5Oget_info1 and H5Oget_info2 */ - ret = H5Oget_info1(gid1, &oinfo1); - CHECK(ret, FAIL, "H5Oget_info1"); - ret = H5Oget_info2(gid1, &oinfo2, H5O_INFO_BASIC | H5O_INFO_NUM_ATTRS); - CHECK(ret, FAIL, "H5Oget_info2"); - - /* Verify the object info for "group1" is correct */ - VERIFY(oinfo1.fileno, oinfo2.fileno, "obj info from H5Oget_info1/2"); - VERIFY(oinfo1.num_attrs, oinfo2.num_attrs, "obj info from H5Oget_info1/2"); - - /* Reset object info */ - memset(&oinfo1, 0, sizeof(oinfo1)); - memset(&oinfo2, 0, sizeof(oinfo2)); - - /* Query the object info for "group2" via H5Oget_info1 and H5Oget_info2 */ - ret = H5Oget_info_by_name1(fid, "group2", &oinfo1, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - ret = H5Oget_info_by_name2(fid, "group2", &oinfo2, H5O_INFO_HDR | H5O_INFO_META_SIZE, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - - /* Verify the object info for "group2" is correct */ - VERIFY(oinfo1.hdr.nmesgs, oinfo2.hdr.nmesgs, "obj info from H5Oget_info1/2"); - VERIFY(oinfo1.hdr.nchunks, oinfo2.hdr.nchunks, "obj info from H5Oget_info1/2"); - VERIFY(oinfo1.meta_size.obj.index_size, oinfo2.meta_size.obj.index_size, "obj info from H5Oget_info1/2"); - VERIFY(oinfo1.meta_size.obj.heap_size, oinfo2.meta_size.obj.heap_size, "obj info from H5Oget_info1/2"); - - /* Close everything */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Verify the object info returned to the callback function is correct */ - ret = H5Ovisit1(fid, H5_INDEX_NAME, H5_ITER_INC, visit_obj_cb, NULL); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* test_h5o_getinfo_visit() */ -#endif -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - -/**************************************************************** -** -** test_h5o(): Main H5O (generic object) testing routine. -** -****************************************************************/ -void -test_h5o(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing Objects\n")); - - test_h5o_open(); /* Test generic open function */ -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - test_h5o_open_by_addr(); /* Test opening objects by address */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - test_h5o_open_by_token(); /* Test opening objects by token */ - test_h5o_close(); /* Test generic close function */ - test_h5o_refcount(); /* Test incrementing and decrementing reference count */ - test_h5o_plist(); /* Test object creation properties */ - test_h5o_link(); /* Test object link routine */ -#if 0 - test_h5o_comment(); /* Test routines for comment */ - test_h5o_comment_by_name(); /* Test routines for comment by name */ -#endif - test_h5o_getinfo_same_file(); /* Test info for objects in the same file */ -#ifndef H5_NO_DEPRECATED_SYMBOLS -#if 0 - test_h5o_open_by_addr_deprec(); /* Test opening objects by address with H5Lget_info1 */ - test_h5o_getinfo_visit(); /* Test object info for H5Oget_info1/2 and H5Ovisit1 */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif -} /* test_h5o() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_h5o - * - * Purpose: Cleanup temporary test files - * - * Return: none - * - *------------------------------------------------------------------------- - */ -void -cleanup_h5o(void) -{ - char filename[1024]; - - H5E_BEGIN_TRY - { - h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); - H5Fdelete(filename, H5P_DEFAULT); - } - H5E_END_TRY -} diff --git a/test/API/th5s.c b/test/API/th5s.c deleted file mode 100644 index 3ab21f4b2b5..00000000000 --- a/test/API/th5s.c +++ /dev/null @@ -1,3542 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: th5s - * - * Test the dataspace functionality - * - *************************************************************/ - -#include "testhdf5.h" -/* #include "H5srcdir.h" */ - -/* #include "H5Iprivate.h" */ -/* #include "H5Pprivate.h" */ - -#if 0 -/* - * This file needs to access private information from the H5S package. - * This file also needs to access the dataspace testing code. - */ -#define H5S_FRIEND /*suppress error about including H5Spkg */ -#define H5S_TESTING /*suppress warning about H5S testing funcs*/ -#include "H5Spkg.h" /* Dataspaces */ - -/* - * This file needs to access private information from the H5O package. - * This file also needs to access the dataspace testing code. - */ -#define H5O_FRIEND /*suppress error about including H5Opkg */ -#define H5O_TESTING -#include "H5Opkg.h" /* Object header */ -#endif - -#define TESTFILE "th5s.h5" -#define DATAFILE "th5s1.h5" -#define NULLFILE "th5s2.h5" -#define BASICFILE "th5s3.h5" -#define ZEROFILE "th5s4.h5" -#define BASICDATASET "basic_dataset" -#define BASICDATASET1 "basic_dataset1" -#define BASICDATASET2 "basic_dataset2" -#define BASICDATASET3 "basic_dataset3" -#define BASICDATASET4 "basic_dataset4" -#define BASICATTR "basic_attribute" -#define NULLDATASET "null_dataset" -#define NULLATTR "null_attribute" -#define EXTFILE_NAME "ext_file" - -/* 3-D dataset with fixed dimensions */ -#define SPACE1_RANK 3 -#define SPACE1_DIM1 3 -#define SPACE1_DIM2 15 -#define SPACE1_DIM3 13 - -/* 4-D dataset with one unlimited dimension */ -#define SPACE2_RANK 4 -#define SPACE2_DIM1 0 -#define SPACE2_DIM2 15 -#define SPACE2_DIM3 13 -#define SPACE2_DIM4 23 -#define SPACE2_MAX1 H5S_UNLIMITED -#define SPACE2_MAX2 15 -#define SPACE2_MAX3 13 -#define SPACE2_MAX4 23 - -/* Scalar dataset with simple datatype */ -#define SPACE3_RANK 0 -unsigned space3_data = 65; - -/* Scalar dataset with compound datatype */ -#define SPACE4_FIELDNAME1 "c1" -#define SPACE4_FIELDNAME2 "u" -#define SPACE4_FIELDNAME3 "f" -#define SPACE4_FIELDNAME4 "c2" -size_t space4_field1_off = 0; -size_t space4_field2_off = 0; -size_t space4_field3_off = 0; -size_t space4_field4_off = 0; -struct space4_struct { - char c1; - unsigned u; - float f; - char c2; -} space4_data = {'v', 987123, -3.14F, 'g'}; /* Test data for 4th dataspace */ - -/* - * Testing configuration defines used by: - * test_h5s_encode_regular_hyper() - * test_h5s_encode_irregular_hyper() - * test_h5s_encode_points() - */ -#define CONFIG_8 1 -#define CONFIG_16 2 -#define CONFIG_32 3 -#define POWER8 256 /* 2^8 */ -#define POWER16 65536 /* 2^16 */ -#define POWER32 4294967296 /* 2^32 */ - -/**************************************************************** -** -** test_h5s_basic(): Test basic H5S (dataspace) code. -** -****************************************************************/ -static void -test_h5s_basic(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t dset1; /* Dataset ID */ - hid_t aid1; /* Attribute ID */ - int rank; /* Logical rank of dataspace */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2, SPACE2_DIM3, SPACE2_DIM4}; - hsize_t dims3[H5S_MAX_RANK + 1]; - hsize_t max2[] = {SPACE2_MAX1, SPACE2_MAX2, SPACE2_MAX3, SPACE2_MAX4}; - hsize_t tdims[4]; /* Dimension array to test with */ - hsize_t tmax[4]; - hssize_t n; /* Number of dataspace elements */ -#if 0 - bool driver_is_default_compatible; -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspace Manipulation\n")); - - sid1 = H5Screate_simple(SPACE1_RANK, dims1, max2); - CHECK(sid1, FAIL, "H5Screate_simple"); - - n = H5Sget_simple_extent_npoints(sid1); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); - - rank = H5Sget_simple_extent_ndims(sid1); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); - - rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); - CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - - sid2 = H5Screate_simple(SPACE2_RANK, dims2, max2); - CHECK(sid2, FAIL, "H5Screate_simple"); - - n = H5Sget_simple_extent_npoints(sid2); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, SPACE2_DIM1 * SPACE2_DIM2 * SPACE2_DIM3 * SPACE2_DIM4, "H5Sget_simple_extent_npoints"); - - rank = H5Sget_simple_extent_ndims(sid2); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE2_RANK, "H5Sget_simple_extent_ndims"); - - rank = H5Sget_simple_extent_dims(sid2, tdims, tmax); - CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tdims, dims2, SPACE2_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tmax, max2, SPACE2_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - - /* Change max dims to be equal to the dimensions */ - ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); - CHECK(ret, FAIL, "H5Sset_extent_simple"); - rank = H5Sget_simple_extent_dims(sid1, tdims, tmax); - CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tmax, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* - * Check to be sure we can't create a simple dataspace that has too many - * dimensions. - */ - H5E_BEGIN_TRY - { - sid1 = H5Screate_simple(H5S_MAX_RANK + 1, dims3, NULL); - } - H5E_END_TRY - VERIFY(sid1, FAIL, "H5Screate_simple"); -#if 0 - /* - * Try reading a file that has been prepared that has a dataset with a - * higher dimensionality than what the library can handle. - * - * If this test fails and the H5S_MAX_RANK variable has changed, follow - * the instructions in space_overflow.c for regenerating the th5s.h5 file. - */ - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK_I(ret, "h5_driver_is_default_vfd_compatible"); - - if (driver_is_default_compatible) { - const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */ - - fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK_I(fid1, "H5Fopen"); - if (fid1 >= 0) { - dset1 = H5Dopen2(fid1, "dset", H5P_DEFAULT); - VERIFY(dset1, FAIL, "H5Dopen2"); - ret = H5Fclose(fid1); - CHECK_I(ret, "H5Fclose"); - } - else - printf("***cannot open the pre-created H5S_MAX_RANK test file (%s)\n", testfile); - } -#endif - /* Verify that incorrect dimensions don't work */ - dims1[0] = H5S_UNLIMITED; - H5E_BEGIN_TRY - { - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - } - H5E_END_TRY - VERIFY(sid1, FAIL, "H5Screate_simple"); - - dims1[0] = H5S_UNLIMITED; - sid1 = H5Screate(H5S_SIMPLE); - CHECK(sid1, FAIL, "H5Screate"); - - H5E_BEGIN_TRY - { - ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sset_extent_simple"); - - ret = H5Sclose(sid1); - CHECK_I(ret, "H5Sclose"); - - /* - * Try writing simple dataspaces without setting their extents - */ - /* Create the file */ - fid1 = H5Fcreate(BASICFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - dims1[0] = SPACE1_DIM1; - - sid1 = H5Screate(H5S_SIMPLE); - CHECK(sid1, FAIL, "H5Screate"); - sid2 = H5Screate_simple(1, dims1, dims1); - CHECK(sid2, FAIL, "H5Screate"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) { - /* This dataset's space has no extent; it should not be created */ - H5E_BEGIN_TRY - { - dset1 = - H5Dcreate2(fid1, BASICDATASET, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(dset1, FAIL, "H5Dcreate2"); - } - - dset1 = H5Dcreate2(fid1, BASICDATASET2, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - - /* Try some writes with the bad dataspace (sid1) */ - H5E_BEGIN_TRY - { - ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, &n); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dwrite"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) { - H5E_BEGIN_TRY - { - ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &n); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dwrite"); - - H5E_BEGIN_TRY - { - ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, sid1, H5P_DEFAULT, &n); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dwrite"); - } - - /* Try to iterate using the bad dataspace */ - H5E_BEGIN_TRY - { - ret = H5Diterate(&n, H5T_NATIVE_INT, sid1, NULL, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Diterate"); - - /* Try to fill using the bad dataspace */ - H5E_BEGIN_TRY - { - ret = H5Dfill(NULL, H5T_NATIVE_INT, &n, H5T_NATIVE_INT, sid1); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dfill"); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { - /* Now use the bad dataspace as the space for an attribute */ - H5E_BEGIN_TRY - { - aid1 = H5Acreate2(dset1, BASICATTR, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(aid1, FAIL, "H5Acreate2"); - } - - /* Make sure that dataspace reads using the bad dataspace fail */ - H5E_BEGIN_TRY - { - ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, &n); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dread"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) { - H5E_BEGIN_TRY - { - ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &n); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dread"); - - H5E_BEGIN_TRY - { - ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, sid1, H5P_DEFAULT, &n); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dread"); - } - - /* Clean up */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_h5s_basic() */ - -/**************************************************************** -** -** test_h5s_null(): Test NULL dataspace -** -****************************************************************/ -static void -test_h5s_null(void) -{ - hid_t fid; /* File ID */ - hid_t sid; /* Dataspace IDs */ - hid_t dset_sid, dset_sid2; /* Dataspace IDs */ - hid_t attr_sid; /* Dataspace IDs */ - hid_t did; /* Dataset ID */ - hid_t attr; /*Attribute ID */ - H5S_class_t stype; /* dataspace type */ - hssize_t nelem; /* Number of elements */ - unsigned uval = 2; /* Buffer for writing to dataset */ - int val = 1; /* Buffer for writing to attribute */ - H5S_sel_type sel_type; /* Type of selection currently */ - hsize_t dims[1] = {10}; /* Dimensions for converting null dataspace to simple */ - H5S_class_t space_type; /* Type of dataspace */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Null Dataspace\n")); - - /* Create the file */ - fid = H5Fcreate(NULLFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - sid = H5Screate(H5S_NULL); - CHECK(sid, FAIL, "H5Screate"); - - /* Check that the null dataspace actually has 0 elements */ - nelem = H5Sget_simple_extent_npoints(sid); - VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); - - /* Check that the dataspace was created with an "all" selection */ - sel_type = H5Sget_select_type(sid); - VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); - - /* Check that the null dataspace has 0 elements selected */ - nelem = H5Sget_select_npoints(sid); - VERIFY(nelem, 0, "H5Sget_select_npoints"); - - /* Change to "none" selection */ - ret = H5Sselect_none(sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Check that the null dataspace has 0 elements selected */ - nelem = H5Sget_select_npoints(sid); - VERIFY(nelem, 0, "H5Sget_select_npoints"); - - /* Check to be sure we can't set a hyperslab selection on a null dataspace */ - H5E_BEGIN_TRY - { - hsize_t start[1] = {0}; - hsize_t count[1] = {0}; - - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - - /* Check to be sure we can't set a point selection on a null dataspace */ - H5E_BEGIN_TRY - { - hsize_t coord[1][1]; /* Coordinates for point selection */ - - coord[0][0] = 0; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sselect_elements"); - - /* Create first dataset */ - did = H5Dcreate2(fid, NULLDATASET, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Write "nothing" to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Write "nothing" to the dataset (with type conversion :-) */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval); - CHECK(ret, FAIL, "H5Dread"); - VERIFY(uval, 2, "H5Dread"); - - /* Try reading from the dataset (with type conversion :-) (make certain our buffer is unmodified) */ - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val); - CHECK(ret, FAIL, "H5Dread"); - VERIFY(val, 1, "H5Dread"); - - /* Create an attribute for the group */ - attr = H5Acreate2(did, NULLATTR, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write "nothing" to the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_INT, &val); - CHECK(ret, FAIL, "H5Awrite"); - - /* Write "nothing" to the attribute (with type conversion :-) */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, &uval); - CHECK(ret, FAIL, "H5Awrite"); - - /* Try reading from the attribute (make certain our buffer is unmodified) */ - ret = H5Aread(attr, H5T_NATIVE_INT, &val); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(val, 1, "H5Aread"); - - /* Try reading from the attribute (with type conversion :-) (make certain our buffer is unmodified) */ - ret = H5Aread(attr, H5T_NATIVE_UINT, &uval); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(uval, 2, "H5Aread"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Verify that we've got the right kind of dataspace */ - space_type = H5Sget_simple_extent_type(sid); - VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type"); - - /* Convert the null dataspace to a simple dataspace */ - ret = H5Sset_extent_simple(sid, 1, dims, NULL); - CHECK(ret, FAIL, "H5Sset_extent_simple"); - - /* Verify that we've got the right kind of dataspace now */ - space_type = H5Sget_simple_extent_type(sid); - VERIFY(space_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /*============================================ - * Reopen the file to check the dataspace - *============================================ - */ - fid = H5Fopen(NULLFILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Reopen the dataset */ - did = H5Dopen2(fid, NULLDATASET, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Get the space of the dataset */ - dset_sid = H5Dget_space(did); - CHECK(dset_sid, FAIL, "H5Dget_space"); - - /* Query the NULL dataspace */ - dset_sid2 = H5Scopy(dset_sid); - CHECK(dset_sid2, FAIL, "H5Scopy"); - - /* Verify the class type of dataspace */ - stype = H5Sget_simple_extent_type(dset_sid2); - VERIFY(stype, H5S_NULL, "H5Sget_simple_extent_type"); - - /* Verify there is zero element in the dataspace */ - ret = (herr_t)H5Sget_simple_extent_npoints(dset_sid2); - VERIFY(ret, 0, "H5Sget_simple_extent_npoints"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval); - CHECK(ret, FAIL, "H5Dread"); - VERIFY(uval, 2, "H5Dread"); - - /* Close the dataspace */ - ret = H5Sclose(dset_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(dset_sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Open the attribute for the dataset */ - attr = H5Aopen(did, NULLATTR, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Get the space of the dataset */ - attr_sid = H5Aget_space(attr); - CHECK(attr_sid, FAIL, "H5Aget_space"); - - /* Verify the class type of dataspace */ - stype = H5Sget_simple_extent_type(attr_sid); - VERIFY(stype, H5S_NULL, "H5Sget_simple_extent_type"); - - /* Verify there is zero element in the dataspace */ - ret = (herr_t)H5Sget_simple_extent_npoints(attr_sid); - VERIFY(ret, 0, "H5Sget_simple_extent_npoints"); - - /* Close the dataspace */ - ret = H5Sclose(attr_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Try reading from the attribute (make certain our buffer is unmodified) */ - ret = H5Aread(attr, H5T_NATIVE_INT, &val); - CHECK(ret, FAIL, "H5Aread"); - VERIFY(val, 1, "H5Aread"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_h5s_null() */ - -/**************************************************************** -** -** test_h5s_zero_dim(): Test the code for dataspace with zero dimension size -** -****************************************************************/ -static void -test_h5s_zero_dim(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t sid1, attr_sid; /* Dataspace ID */ - hid_t sid_chunk; /* Dataspace ID for chunked dataset */ - hid_t dset1; /* Dataset ID */ - hid_t plist_id; /* Dataset creation property list */ - hid_t attr; /* Attribute ID */ - int rank; /* Logical rank of dataspace */ - hsize_t dims1[] = {0, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t max_dims[] = {SPACE1_DIM1 + 1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t extend_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t chunk_dims[] = {SPACE1_DIM1, SPACE1_DIM2 / 3, SPACE1_DIM3}; - hsize_t tdims[SPACE1_RANK]; /* Dimension array to test with */ - int wdata[SPACE1_DIM2][SPACE1_DIM3]; - int rdata[SPACE1_DIM2][SPACE1_DIM3]; - short wdata_short[SPACE1_DIM2][SPACE1_DIM3]; - short rdata_short[SPACE1_DIM2][SPACE1_DIM3]; - int wdata_real[SPACE1_DIM1][SPACE1_DIM2][SPACE1_DIM3]; - int rdata_real[SPACE1_DIM1][SPACE1_DIM2][SPACE1_DIM3]; - int val = 3; - hsize_t start[] = {0, 0, 0}; - hsize_t count[] = {3, 15, 13}; - hsize_t coord[1][3]; /* Coordinates for point selection */ - hssize_t nelem; /* Number of elements */ - H5S_sel_type sel_type; /* Type of selection currently */ - H5S_class_t stype; /* dataspace type */ - H5D_alloc_time_t alloc_time; /* Space allocation time */ - herr_t ret; /* Generic return value */ - unsigned int i, j, k; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspace with zero dimension size\n")); - - /* Initialize the data */ - for (i = 0; i < SPACE1_DIM2; i++) - for (j = 0; j < SPACE1_DIM3; j++) { - wdata[i][j] = (int)(i + j); - rdata[i][j] = 7; - wdata_short[i][j] = (short)(i + j); - rdata_short[i][j] = 7; - } - - for (i = 0; i < SPACE1_DIM1; i++) - for (j = 0; j < SPACE1_DIM2; j++) - for (k = 0; k < SPACE1_DIM3; k++) - wdata_real[i][j][k] = (int)(i + j + k); - - /* Test with different space allocation times */ - for (alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) { - - /* Make sure we can create the space with the dimension size 0 (starting from v1.8.7). - * The dimension doesn't need to be unlimited. */ - dims1[0] = 0; - dims1[1] = SPACE1_DIM2; - dims1[2] = SPACE1_DIM3; - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - sid1 = H5Screate(H5S_SIMPLE); - CHECK(sid1, FAIL, "H5Screate"); - - /* SID1 has the 1st dimension size as zero. The maximal dimension will be - * the same as the dimension because of the NULL passed in. */ - ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL); - CHECK(ret, FAIL, "H5Sset_extent_simple"); - - /* Check that the dataspace actually has 0 elements */ - nelem = H5Sget_simple_extent_npoints(sid1); - VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); - - /* Check that the dataspace was created with an "all" selection */ - sel_type = H5Sget_select_type(sid1); - VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); - - /* Check that the dataspace has 0 elements selected */ - nelem = H5Sget_select_npoints(sid1); - VERIFY(nelem, 0, "H5Sget_select_npoints"); - - /* Change to "none" selection */ - ret = H5Sselect_none(sid1); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Check that the dataspace has 0 elements selected */ - nelem = H5Sget_select_npoints(sid1); - VERIFY(nelem, 0, "H5Sget_select_npoints"); - - /* Try to select all dataspace */ - ret = H5Sselect_all(sid1); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Check that the dataspace has 0 elements selected */ - nelem = H5Sget_select_npoints(sid1); - VERIFY(nelem, 0, "H5Sget_select_npoints"); - - /* Create the dataspace for chunked dataset with the first dimension size as zero. - * The maximal dimensions are bigger than the dimensions for later expansion. */ - sid_chunk = H5Screate_simple(SPACE1_RANK, dims1, max_dims); - CHECK(sid_chunk, FAIL, "H5Screate_simple"); - - /*============================================ - * Make sure we can use 0-dimension to create - * contiguous, chunked, compact, and external - * datasets, and also attribute. - *============================================ - */ - fid1 = H5Fcreate(ZEROFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /*===================== Contiguous dataset =======================*/ - plist_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist_id, FAIL, "H5Pcreate"); - - ret = H5Pset_alloc_time(plist_id, alloc_time); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - dset1 = H5Dcreate2(fid1, BASICDATASET, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - - ret = H5Pclose(plist_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Write "nothing" to the dataset */ - ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) { - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); - } - } - } - - /* Write "nothing" to the dataset (with type conversion :-) */ - ret = H5Dwrite(dset1, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_short); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata_short); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) { - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata_short[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]); - } - } - } - - if (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) { - /* Select a hyperslab beyond its current dimension sizes, then try to write - * the data. It should fail. */ - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - H5E_BEGIN_TRY - { - ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, wdata); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dwrite"); - } - - /* Change to "none" selection */ - ret = H5Sselect_none(sid1); - CHECK(ret, FAIL, "H5Sselect_none"); - - if (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) { - /* Select a point beyond the dimension size, then try to write the data. - * It should fail. */ - coord[0][0] = 2; - coord[0][1] = 5; - coord[0][2] = 3; - ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - H5E_BEGIN_TRY - { - ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &val); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dwrite"); - } - - /* Restore the selection to all */ - ret = H5Sselect_all(sid1); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /*=================== Chunked dataset ====================*/ - plist_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist_id, FAIL, "H5Pcreate"); - - ret = H5Pset_chunk(plist_id, SPACE1_RANK, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* ret = H5Pset_alloc_time(plist_id, alloc_time); */ - /* CHECK(ret, FAIL, "H5Pset_alloc_time"); */ - - dset1 = - H5Dcreate2(fid1, BASICDATASET1, H5T_NATIVE_INT, sid_chunk, H5P_DEFAULT, plist_id, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - - /* Write "nothing" to the dataset */ - ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); - } - } - - /* Now extend the dataset to SPACE1_DIM1*SPACE1_DIM2*SPACE1_DIM3 and make sure - * we can write data to it */ - extend_dims[0] = SPACE1_DIM1; - ret = H5Dset_extent(dset1, extend_dims); - CHECK(ret, FAIL, "H5Dset_extent"); - - ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_real); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_real); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM1; i++) { - for (j = 0; j < SPACE1_DIM2; j++) { - for (k = 0; k < SPACE1_DIM3; k++) { - if (rdata_real[i][j][k] != wdata_real[i][j][k]) { - H5_FAILED(); - printf("element [%d][%d][%d] is %d but should have been %d\n", i, j, k, - rdata_real[i][j][k], wdata_real[i][j][k]); - } - } - } - } - - /* Now shrink the first dimension size of the dataset to 0 and make sure no data is in it */ - extend_dims[0] = 0; - ret = H5Dset_extent(dset1, extend_dims); - CHECK(ret, FAIL, "H5Dset_extent"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); - } - } - if ((vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) && - (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - /* Now extend the first dimension size of the dataset to SPACE1_DIM1*3 past the maximal size. - * It is supposed to fail. */ - extend_dims[0] = SPACE1_DIM1 * 3; - H5E_BEGIN_TRY - { - ret = H5Dset_extent(dset1, extend_dims); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dset_extent"); - } - ret = H5Pclose(plist_id); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /*=================== Compact dataset =====================*/ - plist_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist_id, FAIL, "H5Pcreate"); - - ret = H5Pset_layout(plist_id, H5D_COMPACT); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Don't set the allocation time for compact storage datasets (must be early) */ - - dset1 = H5Dcreate2(fid1, BASICDATASET2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - - /* Write "nothing" to the dataset */ - ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); - } - } - - ret = H5Pclose(plist_id); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /*=========== Contiguous dataset with external storage ============*/ - plist_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist_id, FAIL, "H5Pcreate"); - - /* Change the DCPL for contiguous layout with external storage. The size of the reserved - * space in the external file is the size of the dataset (zero because one dimension size is zero). - * There's no need to clean up the external file since the library doesn't create it - * until the data is written to it. */ - ret = H5Pset_external(plist_id, EXTFILE_NAME, (off_t)0, (hsize_t)0); - CHECK(ret, FAIL, "H5Pset_external"); - - ret = H5Pset_alloc_time(plist_id, alloc_time); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - dset1 = H5Dcreate2(fid1, BASICDATASET3, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - - /* Write "nothing" to the dataset */ - ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) { - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); - } - } - } - - ret = H5Pclose(plist_id); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /*=============== Create an attribute for the file ================*/ - attr = H5Acreate2(fid1, NULLATTR, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Acreate2"); - - /* Write "nothing" to the attribute */ - ret = H5Awrite(attr, H5T_NATIVE_INT, wdata); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the attribute (make certain our buffer is unmodified) */ - ret = H5Aread(attr, H5T_NATIVE_INT, rdata); - CHECK(ret, FAIL, "H5Aread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) { - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); - } - } - } - - /* Write "nothing" to the attribute (with type conversion :-) */ - ret = H5Awrite(attr, H5T_NATIVE_SHORT, wdata_short); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Try reading from the attribute (with type conversion :-) (make certain our buffer is unmodified) */ - ret = H5Aread(attr, H5T_NATIVE_SHORT, rdata_short); - CHECK(ret, FAIL, "H5Aread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) { - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata_short[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]); - } - } - } - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /*=============================================================== - * Extend the dimension to make it a normal dataspace (3x15x13). - * Verify that data can be written to and read from the chunked - * dataset now. - *=============================================================== - */ - dims1[0] = SPACE1_DIM1; - ret = H5Sset_extent_simple(sid_chunk, SPACE1_RANK, dims1, max_dims); - CHECK(ret, FAIL, "H5Sset_extent_simple"); - - nelem = H5Sget_simple_extent_npoints(sid_chunk); - CHECK(nelem, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(nelem, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); - - rank = H5Sget_simple_extent_ndims(sid_chunk); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); - - rank = H5Sget_simple_extent_dims(sid_chunk, tdims, NULL); - CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - - /* Set it to chunked dataset */ - plist_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist_id, FAIL, "H5Pcreate"); - - ret = H5Pset_chunk(plist_id, SPACE1_RANK, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - ret = H5Pset_alloc_time(plist_id, alloc_time); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - dset1 = - H5Dcreate2(fid1, BASICDATASET4, H5T_NATIVE_INT, sid_chunk, H5P_DEFAULT, plist_id, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_real); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_real); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM1; i++) { - for (j = 0; j < SPACE1_DIM2; j++) { - for (k = 0; k < SPACE1_DIM3; k++) { - if (rdata_real[i][j][k] != wdata_real[i][j][k]) { - H5_FAILED(); - printf("element [%d][%d][%d] is %d but should have been %d\n", i, j, k, - rdata_real[i][j][k], wdata_real[i][j][k]); - } - } - } - } - - ret = H5Pclose(plist_id); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Change the dimensions to make them zero size again (0x0x0). Verify that - * no element is in the dataspace. */ - dims1[0] = dims1[1] = dims1[2] = 0; - ret = H5Sset_extent_simple(sid_chunk, SPACE1_RANK, dims1, NULL); - CHECK(ret, FAIL, "H5Sset_extent_simple"); - - /* Check that the dataspace actually has 0 elements */ - nelem = H5Sget_simple_extent_npoints(sid_chunk); - VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); - - /* Check that the dataspace was created with an "all" selection */ - sel_type = H5Sget_select_type(sid_chunk); - VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); - - /* Check that the dataspace has 0 elements selected */ - nelem = H5Sget_select_npoints(sid_chunk); - VERIFY(nelem, 0, "H5Sget_select_npoints"); - - /* Change to "none" selection */ - ret = H5Sselect_none(sid_chunk); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Check that the dataspace has 0 elements selected */ - nelem = H5Sget_select_npoints(sid_chunk); - VERIFY(nelem, 0, "H5Sget_select_npoints"); - - ret = H5Sclose(sid_chunk); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /*============================================ - * Reopen the file to check the dataspace - *============================================ - */ - fid1 = H5Fopen(ZEROFILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Reopen the chunked dataset */ - dset1 = H5Dopen2(fid1, BASICDATASET1, H5P_DEFAULT); - CHECK(dset1, FAIL, "H5Dopen2"); - - /* Get the space of the dataset and query it */ - sid1 = H5Dget_space(dset1); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Verify the class type of dataspace */ - stype = H5Sget_simple_extent_type(sid1); - VERIFY(stype, H5S_SIMPLE, "H5Sget_simple_extent_type"); - - /* Verify there is zero element in the dataspace */ - nelem = H5Sget_simple_extent_npoints(sid1); - VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); - - /* Verify the dimension sizes are correct */ - rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); - CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(tdims[0], 0, "H5Sget_simple_extent_dims"); - VERIFY(tdims[1], SPACE1_DIM2, "H5Sget_simple_extent_dims"); - VERIFY(tdims[2], SPACE1_DIM3, "H5Sget_simple_extent_dims"); - - /* Try reading from the dataset (make certain our buffer is unmodified) */ - ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) { - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]); - } - } - } - - /* Close the dataset and its dataspace */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Open the attribute for the file */ - attr = H5Aopen(fid1, NULLATTR, H5P_DEFAULT); - CHECK(attr, FAIL, "H5Aopen"); - - /* Get the space of the dataset */ - attr_sid = H5Aget_space(attr); - CHECK(attr_sid, FAIL, "H5Aget_space"); - - /* Verify the class type of dataspace */ - stype = H5Sget_simple_extent_type(attr_sid); - VERIFY(stype, H5S_SIMPLE, "H5Sget_simple_extent_type"); - - /* Verify there is zero element in the dataspace */ - nelem = H5Sget_simple_extent_npoints(attr_sid); - VERIFY(nelem, 0, "H5Sget_simple_extent_npoints"); - - /* Try reading from the attribute (make certain our buffer is unmodified) */ - ret = H5Aread(attr, H5T_NATIVE_SHORT, rdata_short); - CHECK(ret, FAIL, "H5Aread"); - - /* Check results */ - for (i = 0; i < SPACE1_DIM2; i++) { - for (j = 0; j < SPACE1_DIM3; j++) { - if (rdata_short[i][j] != 7) { - H5_FAILED(); - printf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]); - } - } - } - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close the dataspace */ - ret = H5Sclose(attr_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ -} /* test_h5s_zero_dim() */ - -/**************************************************************** -** -** test_h5s_encode(): Test H5S (dataspace) encoding and decoding. -** -** Note: See "RFC: H5Sencode/H5Sdecode Format Change". -** -****************************************************************/ -static void -test_h5s_encode(H5F_libver_t low, H5F_libver_t high) -{ - hid_t sid1, sid2, sid3; /* Dataspace ID */ - hid_t decoded_sid1, decoded_sid2, decoded_sid3; - int rank; /* Logical rank of dataspace */ - hid_t fapl = -1; /* File access property list ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - size_t sbuf_size = 0, null_size = 0, scalar_size = 0; - unsigned char *sbuf = NULL, *null_sbuf = NULL, *scalar_buf = NULL; - hsize_t tdims[4]; /* Dimension array to test with */ - hssize_t n; /* Number of dataspace elements */ - hsize_t start[] = {0, 0, 0}; - hsize_t stride[] = {2, 5, 3}; - hsize_t count[] = {2, 2, 2}; - hsize_t block[] = {1, 3, 1}; - H5S_sel_type sel_type; - H5S_class_t space_type; - hssize_t nblocks; - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspace Encoding and Decoding\n")); - - /*------------------------------------------------------------------------- - * Test encoding and decoding of simple dataspace and hyperslab selection. - *------------------------------------------------------------------------- - */ - - /* Create the file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Set low/high bounds in the fapl */ - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the dataspace */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Encode simple dataspace in a buffer with the fapl setting */ - ret = H5Sencode2(sid1, NULL, &sbuf_size, fapl); - CHECK(ret, FAIL, "H5Sencode2"); - - if (sbuf_size > 0) { - sbuf = (unsigned char *)calloc((size_t)1, sbuf_size); - CHECK_PTR(sbuf, "calloc"); - } - - /* Try decoding bogus buffer */ - H5E_BEGIN_TRY - { - ret_id = H5Sdecode(sbuf); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Sdecode"); - - /* Encode the simple dataspace in a buffer with the fapl setting */ - ret = H5Sencode2(sid1, sbuf, &sbuf_size, fapl); - CHECK(ret, FAIL, "H5Sencode"); - - /* Decode from the dataspace buffer and return an object handle */ - decoded_sid1 = H5Sdecode(sbuf); - CHECK(decoded_sid1, FAIL, "H5Sdecode"); - - /* Verify the decoded dataspace */ - n = H5Sget_simple_extent_npoints(decoded_sid1); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); - - /* Retrieve and verify the dataspace rank */ - rank = H5Sget_simple_extent_ndims(decoded_sid1); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); - - /* Retrieve and verify the dataspace dimensions */ - rank = H5Sget_simple_extent_dims(decoded_sid1, tdims, NULL); - CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - - /* Verify the type of dataspace selection */ - sel_type = H5Sget_select_type(decoded_sid1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify the number of hyperslab blocks */ - nblocks = H5Sget_select_hyper_nblocks(decoded_sid1); - VERIFY(nblocks, 2 * 2 * 2, "H5Sget_select_hyper_nblocks"); - - /* Close the dataspaces */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(decoded_sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /*------------------------------------------------------------------------- - * Test encoding and decoding of null dataspace. - *------------------------------------------------------------------------- - */ - sid2 = H5Screate(H5S_NULL); - CHECK(sid2, FAIL, "H5Screate"); - - /* Encode null dataspace in a buffer */ - ret = H5Sencode2(sid2, NULL, &null_size, fapl); - CHECK(ret, FAIL, "H5Sencode"); - - if (null_size > 0) { - null_sbuf = (unsigned char *)calloc((size_t)1, null_size); - CHECK_PTR(null_sbuf, "calloc"); - } - - /* Encode the null dataspace in the buffer */ - ret = H5Sencode2(sid2, null_sbuf, &null_size, fapl); - CHECK(ret, FAIL, "H5Sencode2"); - - /* Decode from the dataspace buffer and return an object handle */ - decoded_sid2 = H5Sdecode(null_sbuf); - CHECK(decoded_sid2, FAIL, "H5Sdecode"); - - /* Verify the decoded dataspace type */ - space_type = H5Sget_simple_extent_type(decoded_sid2); - VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type"); - - /* Close the dataspaces */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(decoded_sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /*------------------------------------------------------------------------- - * Test encoding and decoding of scalar dataspace. - *------------------------------------------------------------------------- - */ - /* Create scalar dataspace */ - sid3 = H5Screate(H5S_SCALAR); - CHECK(sid3, FAIL, "H5Screate_simple"); - - /* Encode scalar dataspace in a buffer */ - ret = H5Sencode2(sid3, NULL, &scalar_size, fapl); - CHECK(ret, FAIL, "H5Sencode"); - - if (scalar_size > 0) { - scalar_buf = (unsigned char *)calloc((size_t)1, scalar_size); - CHECK_PTR(scalar_buf, "calloc"); - } - - /* Encode the scalar dataspace in the buffer */ - ret = H5Sencode2(sid3, scalar_buf, &scalar_size, fapl); - CHECK(ret, FAIL, "H5Sencode2"); - - /* Decode from the dataspace buffer and return an object handle */ - decoded_sid3 = H5Sdecode(scalar_buf); - CHECK(decoded_sid3, FAIL, "H5Sdecode"); - - /* Verify extent type */ - space_type = H5Sget_simple_extent_type(decoded_sid3); - VERIFY(space_type, H5S_SCALAR, "H5Sget_simple_extent_type"); - - /* Verify decoded dataspace */ - n = H5Sget_simple_extent_npoints(decoded_sid3); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, 1, "H5Sget_simple_extent_npoints"); - - /* Retrieve and verify the dataspace rank */ - rank = H5Sget_simple_extent_ndims(decoded_sid3); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, 0, "H5Sget_simple_extent_ndims"); - - /* Close the dataspaces */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(decoded_sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the file access property list */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Release resources */ - if (sbuf) - free(sbuf); - if (null_sbuf) - free(null_sbuf); - if (scalar_buf) - free(scalar_buf); -} /* test_h5s_encode() */ - -#ifndef H5_NO_DEPRECATED_SYMBOLS - -/**************************************************************** -** -** test_h5s_encode(): Test H5S (dataspace) encoding and decoding. -** -****************************************************************/ -static void -test_h5s_encode1(void) -{ - hid_t sid1, sid2, sid3; /* Dataspace ID */ - hid_t decoded_sid1, decoded_sid2, decoded_sid3; - int rank; /* Logical rank of dataspace */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - size_t sbuf_size = 0, null_size = 0, scalar_size = 0; - unsigned char *sbuf = NULL, *null_sbuf = NULL, *scalar_buf = NULL; - hsize_t tdims[4]; /* Dimension array to test with */ - hssize_t n; /* Number of dataspace elements */ - hsize_t start[] = {0, 0, 0}; - hsize_t stride[] = {2, 5, 3}; - hsize_t count[] = {2, 2, 2}; - hsize_t block[] = {1, 3, 1}; - H5S_sel_type sel_type; - H5S_class_t space_type; - hssize_t nblocks; - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspace Encoding (H5Sencode1) and Decoding\n")); - - /*------------------------------------------------------------------------- - * Test encoding and decoding of simple dataspace and hyperslab selection. - *------------------------------------------------------------------------- - */ - /* Create the dataspace */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Encode simple dataspace in a buffer with the fapl setting */ - ret = H5Sencode1(sid1, NULL, &sbuf_size); - CHECK(ret, FAIL, "H5Sencode2"); - - if (sbuf_size > 0) { - sbuf = (unsigned char *)calloc((size_t)1, sbuf_size); - CHECK_PTR(sbuf, "calloc"); - } - - /* Try decoding bogus buffer */ - H5E_BEGIN_TRY - { - ret_id = H5Sdecode(sbuf); - } - H5E_END_TRY - VERIFY(ret_id, FAIL, "H5Sdecode"); - - /* Encode the simple dataspace in a buffer */ - ret = H5Sencode1(sid1, sbuf, &sbuf_size); - CHECK(ret, FAIL, "H5Sencode"); - - /* Decode from the dataspace buffer and return an object handle */ - decoded_sid1 = H5Sdecode(sbuf); - CHECK(decoded_sid1, FAIL, "H5Sdecode"); - - /* Verify the decoded dataspace */ - n = H5Sget_simple_extent_npoints(decoded_sid1); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints"); - - /* Retrieve and verify the dataspace rank */ - rank = H5Sget_simple_extent_ndims(decoded_sid1); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims"); - - /* Retrieve and verify the dataspace dimensions */ - rank = H5Sget_simple_extent_dims(decoded_sid1, tdims, NULL); - CHECK(rank, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(memcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims"); - - /* Verify the type of dataspace selection */ - sel_type = H5Sget_select_type(decoded_sid1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify the number of hyperslab blocks */ - nblocks = H5Sget_select_hyper_nblocks(decoded_sid1); - VERIFY(nblocks, 2 * 2 * 2, "H5Sget_select_hyper_nblocks"); - - /* Close the dataspaces */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(decoded_sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /*------------------------------------------------------------------------- - * Test encoding and decoding of null dataspace. - *------------------------------------------------------------------------- - */ - sid2 = H5Screate(H5S_NULL); - CHECK(sid2, FAIL, "H5Screate"); - - /* Encode null dataspace in a buffer */ - ret = H5Sencode1(sid2, NULL, &null_size); - CHECK(ret, FAIL, "H5Sencode"); - - if (null_size > 0) { - null_sbuf = (unsigned char *)calloc((size_t)1, null_size); - CHECK_PTR(null_sbuf, "calloc"); - } - - /* Encode the null dataspace in the buffer */ - ret = H5Sencode1(sid2, null_sbuf, &null_size); - CHECK(ret, FAIL, "H5Sencode2"); - - /* Decode from the dataspace buffer and return an object handle */ - decoded_sid2 = H5Sdecode(null_sbuf); - CHECK(decoded_sid2, FAIL, "H5Sdecode"); - - /* Verify the decoded dataspace type */ - space_type = H5Sget_simple_extent_type(decoded_sid2); - VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type"); - - /* Close the dataspaces */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(decoded_sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /*------------------------------------------------------------------------- - * Test encoding and decoding of scalar dataspace. - *------------------------------------------------------------------------- - */ - /* Create scalar dataspace */ - sid3 = H5Screate(H5S_SCALAR); - CHECK(sid3, FAIL, "H5Screate"); - - /* Encode scalar dataspace in a buffer */ - ret = H5Sencode1(sid3, NULL, &scalar_size); - CHECK(ret, FAIL, "H5Sencode"); - - if (scalar_size > 0) { - scalar_buf = (unsigned char *)calloc((size_t)1, scalar_size); - CHECK_PTR(scalar_buf, "calloc"); - } - - /* Encode the scalar dataspace in the buffer */ - ret = H5Sencode1(sid3, scalar_buf, &scalar_size); - CHECK(ret, FAIL, "H5Sencode2"); - - /* Decode from the dataspace buffer and return an object handle */ - decoded_sid3 = H5Sdecode(scalar_buf); - CHECK(decoded_sid3, FAIL, "H5Sdecode"); - - /* Verify extent type */ - space_type = H5Sget_simple_extent_type(decoded_sid3); - VERIFY(space_type, H5S_SCALAR, "H5Sget_simple_extent_type"); - - /* Verify decoded dataspace */ - n = H5Sget_simple_extent_npoints(decoded_sid3); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, 1, "H5Sget_simple_extent_npoints"); - - /* Retrieve and verify the dataspace rank */ - rank = H5Sget_simple_extent_ndims(decoded_sid3); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, 0, "H5Sget_simple_extent_ndims"); - - /* Close the dataspaces */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(decoded_sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Release resources */ - if (sbuf) - free(sbuf); - if (null_sbuf) - free(null_sbuf); - if (scalar_buf) - free(scalar_buf); -} /* test_h5s_encode1() */ - -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - -/**************************************************************** -** -** test_h5s_check_encoding(): -** This is the helper routine to verify that H5Sencode2() -** works as specified in the RFC for the library format setting -** in the file access property list. -** See "RFC: H5Sencode/H5Sdeocde Format Change". -** -** This routine is used by: -** test_h5s_encode_regular_hyper() -** test_h5s_encode_irregular_hyper() -** test_h5s_encode_points() -** -****************************************************************/ -static herr_t -test_h5s_check_encoding(hid_t in_fapl, hid_t in_sid, uint32_t expected_version, uint8_t expected_enc_size, - bool expected_to_fail) -{ - char *buf = NULL; /* Pointer to the encoded buffer */ - size_t buf_size; /* Size of the encoded buffer */ - hid_t d_sid = -1; /* The decoded dataspace ID */ - htri_t check; - hsize_t in_low_bounds[1]; /* The low bounds for the selection for in_sid */ - hsize_t in_high_bounds[1]; /* The high bounds for the selection for in_sid */ - hsize_t d_low_bounds[1]; /* The low bounds for the selection for d_sid */ - hsize_t d_high_bounds[1]; /* The high bounds for the selection for d_sid */ - herr_t ret; /* Return value */ - - /* Get buffer size for encoding with the format setting in in_fapl */ - H5E_BEGIN_TRY - { - ret = H5Sencode2(in_sid, NULL, &buf_size, in_fapl); - } - H5E_END_TRY - - if (expected_to_fail) { - VERIFY(ret, FAIL, "H5Screate_simple"); - } - else { - - CHECK(ret, FAIL, "H5Sencode2"); - - /* Allocate the buffer for encoding */ - buf = (char *)malloc(buf_size); - CHECK_PTR(buf, "malloc"); - - /* Encode according to the setting in in_fapl */ - ret = H5Sencode2(in_sid, buf, &buf_size, in_fapl); - CHECK(ret, FAIL, "H5Sencode2"); - - /* Decode the buffer */ - d_sid = H5Sdecode(buf); - CHECK(d_sid, FAIL, "H5Sdecode"); - - /* Verify the number of selected points for in_sid and d_sid */ - VERIFY(H5Sget_select_npoints(in_sid), H5Sget_select_npoints(d_sid), "Compare npoints"); - - /* Verify if the two dataspace selections (in_sid, d_sid) are the same shape */ - check = H5Sselect_shape_same(in_sid, d_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare the starting/ending coordinates of the bounding box for in_sid and d_sid */ - ret = H5Sget_select_bounds(in_sid, in_low_bounds, in_high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - ret = H5Sget_select_bounds(d_sid, d_low_bounds, d_high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(in_low_bounds[0], d_low_bounds[0], "Compare selection low bounds"); - VERIFY(in_high_bounds[0], d_high_bounds[0], "Compare selection high bounds"); - - /* - * See "RFC: H5Sencode/H5Sdeocde Format Change" for the verification of: - * H5S_SEL_POINTS: - * --the expected version for point selection info - * --the expected encoded size (version 2 points selection info) - * H5S_SEL_HYPERSLABS: - * --the expected version for hyperslab selection info - * --the expected encoded size (version 3 hyperslab selection info) - */ - - if (H5Sget_select_type(in_sid) == H5S_SEL_POINTS) { - - /* Verify the version */ - VERIFY((uint32_t)buf[35], expected_version, "Version for point selection"); - - /* Verify the encoded size for version 2 */ - if (expected_version == 2) - VERIFY((uint8_t)buf[39], expected_enc_size, "Encoded size of point selection info"); - } - - if (H5Sget_select_type(in_sid) == H5S_SEL_HYPERSLABS) { - - /* Verify the version */ - VERIFY((uint32_t)buf[35], expected_version, "Version for hyperslab selection info"); - - /* Verify the encoded size for version 3 */ - if (expected_version == 3) - VERIFY((uint8_t)buf[40], expected_enc_size, "Encoded size of selection info"); - - } /* hyperslab selection */ - - ret = H5Sclose(d_sid); - CHECK(ret, FAIL, "H5Sclose"); - if (buf) - free(buf); - } - - return (0); - -} /* test_h5s_check_encoding */ - -/**************************************************************** -** -** test_h5s_encode_regular_hyper(): -** This test verifies that H5Sencode2() works as specified in -** the RFC for regular hyperslabs. -** See "RFC: H5Sencode/H5Sdeocde Format Change". -** -****************************************************************/ -static void -test_h5s_encode_regular_hyper(H5F_libver_t low, H5F_libver_t high) -{ - hid_t fapl = -1; /* File access property list ID */ - hid_t sid = -1; /* Dataspace ID */ - hsize_t numparticles = 8388608; /* Used to calculate dimension size */ - unsigned num_dsets = 513; /* Used to calculate dimension size */ - hsize_t total_particles = numparticles * num_dsets; - hsize_t vdsdims[1] = {total_particles}; /* Dimension size */ - hsize_t start, stride, count, block; /* Selection info */ - unsigned config; /* Testing configuration */ - unsigned unlim; /* H5S_UNLIMITED setting or not */ - herr_t ret; /* Generic return value */ - uint32_t expected_version = 0; /* Expected version for selection info */ - uint8_t expected_enc_size = 0; /* Expected encoded size for selection info */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspace encoding of regular hyperslabs\n")); - - /* Create the file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Set the low/high bounds in the fapl */ - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the dataspace */ - sid = H5Screate_simple(1, vdsdims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Testing with each configuration */ - for (config = CONFIG_16; config <= CONFIG_32; config++) { - bool expected_to_fail = false; - - /* Testing with unlimited or not */ - for (unlim = 0; unlim <= 1; unlim++) { - start = 0; - count = unlim ? H5S_UNLIMITED : 2; - - if ((high <= H5F_LIBVER_V18) && (unlim || config == CONFIG_32)) - expected_to_fail = true; - - if (low >= H5F_LIBVER_V112) - expected_version = 3; - else if (config == CONFIG_16 && !unlim) - expected_version = 1; - else - expected_version = 2; - - /* test 1 */ - switch (config) { - case CONFIG_16: - stride = POWER16 - 1; - block = 4; - expected_enc_size = (uint8_t)(expected_version == 3 ? 2 : 4); - break; - case CONFIG_32: - stride = POWER32 - 1; - block = 4; - expected_enc_size = (uint8_t)(expected_version == 3 ? 4 : 8); - - break; - default: - assert(0); - break; - } /* end switch */ - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Verify the version and encoded size expected for this configuration */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - /* test 2 */ - switch (config) { - case CONFIG_16: - stride = POWER16 - 1; - block = POWER16 - 2; - expected_enc_size = (uint8_t)(expected_version == 3 ? 2 : 4); - break; - case CONFIG_32: - stride = POWER32 - 1; - block = POWER32 - 2; - expected_enc_size = (uint8_t)(expected_version == 3 ? 4 : 8); - break; - default: - assert(0); - break; - } /* end switch */ - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Verify the version and encoded size for this configuration */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - /* test 3 */ - switch (config) { - case CONFIG_16: - stride = POWER16 - 1; - block = POWER16 - 1; - expected_enc_size = 4; - break; - case CONFIG_32: - stride = POWER32 - 1; - block = POWER32 - 1; - expected_enc_size = 8; - break; - default: - assert(0); - break; - } - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Verify the version and encoded size expected for this configuration */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - /* test 4 */ - switch (config) { - case CONFIG_16: - stride = POWER16; - block = POWER16 - 2; - expected_enc_size = 4; - break; - case CONFIG_32: - stride = POWER32; - block = POWER32 - 2; - expected_enc_size = 8; - break; - default: - assert(0); - break; - } /* end switch */ - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Verify the version and encoded size expected for this configuration */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - /* test 5 */ - switch (config) { - case CONFIG_16: - stride = POWER16; - block = 1; - expected_enc_size = 4; - break; - case CONFIG_32: - stride = POWER32; - block = 1; - expected_enc_size = 8; - break; - default: - assert(0); - break; - } - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Verify the version and encoded size expected for this configuration */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - } /* for unlim */ - } /* for config */ - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - -} /* test_h5s_encode_regular_hyper() */ - -/**************************************************************** -** -** test_h5s_encode_irregular_hyper(): -** This test verifies that H5Sencode2() works as specified in -** the RFC for irregular hyperslabs. -** See "RFC: H5Sencode/H5Sdeocde Format Change". -** -****************************************************************/ -static void -test_h5s_encode_irregular_hyper(H5F_libver_t low, H5F_libver_t high) -{ - hid_t fapl = -1; /* File access property list ID */ - hid_t sid; /* Dataspace ID */ - hsize_t numparticles = 8388608; /* Used to calculate dimension size */ - unsigned num_dsets = 513; /* Used to calculate dimension size */ - hsize_t total_particles = numparticles * num_dsets; - hsize_t vdsdims[1] = {total_particles}; /* Dimension size */ - hsize_t start, stride, count, block; /* Selection info */ - htri_t is_regular; /* Is this a regular hyperslab */ - unsigned config; /* Testing configuration */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspace encoding of irregular hyperslabs\n")); - - /* Create the file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Set the low/high bounds in the fapl */ - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the dataspace */ - sid = H5Screate_simple(1, vdsdims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Testing with each configuration */ - for (config = CONFIG_8; config <= CONFIG_32; config++) { - bool expected_to_fail = false; /* Whether H5Sencode2 is expected to fail */ - uint32_t expected_version = 0; /* Expected version for selection info */ - uint32_t expected_enc_size = 0; /* Expected encoded size for selection info */ - - start = 0; - count = 2; - block = 4; - - /* H5Sencode2 is expected to fail for library v110 and below - when the selection exceeds the 32 bits integer limit */ - if (high <= H5F_LIBVER_V110 && config == CONFIG_32) - expected_to_fail = true; - - if (low >= H5F_LIBVER_V112 || config == CONFIG_32) - expected_version = 3; - else - expected_version = 1; - - switch (config) { - case CONFIG_8: - stride = POWER8 - 2; - break; - - case CONFIG_16: - stride = POWER16 - 2; - break; - - case CONFIG_32: - stride = POWER32 - 2; - break; - - default: - assert(0); - break; - } - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start = 8; - count = 5; - block = 2; - - switch (config) { - case CONFIG_8: - stride = POWER8; - expected_enc_size = expected_version == 3 ? 2 : 4; - break; - - case CONFIG_16: - stride = POWER16; - expected_enc_size = 4; - break; - - case CONFIG_32: - stride = POWER32; - expected_enc_size = 8; - break; - - default: - assert(0); - break; - } - - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Should be irregular hyperslab */ - is_regular = H5Sis_regular_hyperslab(sid); - VERIFY(is_regular, false, "H5Sis_regular_hyperslab"); - - /* Verify the version and encoded size expected for the configuration */ - assert(expected_enc_size <= 255); - ret = test_h5s_check_encoding(fapl, sid, expected_version, (uint8_t)expected_enc_size, - expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - } /* for config */ - - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* test_h5s_encode_irregular_hyper() */ - -/**************************************************************** -** -** test_h5s_encode_points(): -** This test verifies that H5Sencode2() works as specified in -** the RFC for point selection. -** See "RFC: H5Sencode/H5Sdeocde Format Change". -** -****************************************************************/ -static void -test_h5s_encode_points(H5F_libver_t low, H5F_libver_t high) -{ - hid_t fapl = -1; /* File access property list ID */ - hid_t sid; /* Dataspace ID */ - hsize_t numparticles = 8388608; /* Used to calculate dimension size */ - unsigned num_dsets = 513; /* used to calculate dimension size */ - hsize_t total_particles = numparticles * num_dsets; - hsize_t vdsdims[1] = {total_particles}; /* Dimension size */ - hsize_t coord[4]; /* The point coordinates */ - herr_t ret; /* Generic return value */ - bool expected_to_fail = false; /* Expected to fail or not */ - uint32_t expected_version = 0; /* Expected version for selection info */ - uint8_t expected_enc_size = 0; /* Expected encoded size of selection info */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspace encoding of points selection\n")); - - /* Create the file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Set the low/high bounds in the fapl */ - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the dataspace */ - sid = H5Screate_simple(1, vdsdims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* test 1 */ - coord[0] = 5; - coord[1] = 15; - coord[2] = POWER16; - coord[3] = 19; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - expected_to_fail = false; - expected_enc_size = 4; - expected_version = 1; - - if (low >= H5F_LIBVER_V112) - expected_version = 2; - - /* Verify the version and encoded size expected for the configuration */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - /* test 2 */ - coord[0] = 5; - coord[1] = 15; - coord[2] = POWER32 - 1; - coord[3] = 19; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Expected result same as test 1 */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - /* test 3 */ - if (high <= H5F_LIBVER_V110) - expected_to_fail = true; - - if (high >= H5F_LIBVER_V112) { - expected_version = 2; - expected_enc_size = 8; - } - - coord[0] = 5; - coord[1] = 15; - coord[2] = POWER32 + 1; - coord[3] = 19; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Verify the version and encoded size expected for the configuration */ - ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail); - CHECK(ret, FAIL, "test_h5s_check_encoding"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* test_h5s_encode_points() */ - -/**************************************************************** -** -** test_h5s_encode_length(): -** Test to verify HDFFV-10271 is fixed. -** Verify that version 2 hyperslab encoding length is correct. -** -** See "RFC: H5Sencode/H5Sdecode Format Change" for the -** description of the encoding format. -** -****************************************************************/ -static void -test_h5s_encode_length(void) -{ - hid_t sid; /* Dataspace ID */ - hid_t decoded_sid; /* Dataspace ID from H5Sdecode2 */ - size_t sbuf_size = 0; /* Buffer size for H5Sencode2/1 */ - unsigned char *sbuf = NULL; /* Buffer for H5Sencode2/1 */ - hsize_t dims[1] = {500}; /* Dimension size */ - hsize_t start, count, block, stride; /* Hyperslab selection specifications */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Version 2 Hyperslab Encoding Length is correct\n")); - - /* Create dataspace */ - sid = H5Screate_simple(1, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Setting H5S_UNLIMITED in count will use version 2 for hyperslab encoding */ - start = 0; - stride = 10; - block = 4; - count = H5S_UNLIMITED; - - /* Set hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Encode simple dataspace in a buffer */ - ret = H5Sencode2(sid, NULL, &sbuf_size, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Sencode"); - - /* Allocate the buffer */ - if (sbuf_size > 0) { - sbuf = (unsigned char *)calloc((size_t)1, sbuf_size); - CHECK_PTR(sbuf, "H5Sencode2"); - } - - /* Encode the dataspace */ - ret = H5Sencode2(sid, sbuf, &sbuf_size, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Sencode"); - - /* Verify that length stored at this location in the buffer is correct */ - VERIFY((uint32_t)sbuf[40], 36, "Length for encoding version 2"); - VERIFY((uint32_t)sbuf[35], 2, "Hyperslab encoding version is 2"); - - /* Decode from the dataspace buffer and return an object handle */ - decoded_sid = H5Sdecode(sbuf); - CHECK(decoded_sid, FAIL, "H5Sdecode"); - - /* Verify that the original and the decoded dataspace are equal */ - VERIFY(H5Sget_select_npoints(sid), H5Sget_select_npoints(decoded_sid), "Compare npoints"); - - /* Close the decoded dataspace */ - ret = H5Sclose(decoded_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Free the buffer */ - if (sbuf) - free(sbuf); - - /* Close the original dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* test_h5s_encode_length() */ - -/**************************************************************** -** -** test_h5s_scalar_write(): Test scalar H5S (dataspace) writing code. -** -****************************************************************/ -static void -test_h5s_scalar_write(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - int rank; /* Logical rank of dataspace */ - hsize_t tdims[4]; /* Dimension array to test with */ - hssize_t n; /* Number of dataspace elements */ - H5S_class_t ext_type; /* Extent type */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Scalar Dataspace Manipulation during Writing\n")); - - /* Create file */ - fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Verify a non-zero rank fails with a NULL dimension. */ - H5E_BEGIN_TRY - { - sid1 = H5Screate_simple(SPACE1_RANK, NULL, NULL); - } - H5E_END_TRY - VERIFY(sid1, FAIL, "H5Screate_simple"); - - /* Create scalar dataspace */ - sid1 = H5Screate_simple(SPACE3_RANK, NULL, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Retrieve the number of elements in the dataspace selection */ - n = H5Sget_simple_extent_npoints(sid1); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, 1, "H5Sget_simple_extent_npoints"); - - /* Get the dataspace rank */ - rank = H5Sget_simple_extent_ndims(sid1); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); - - /* Get the dataspace dimension sizes */ - rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); - VERIFY(rank, 0, "H5Sget_simple_extent_dims"); - - /* Verify extent type */ - ext_type = H5Sget_simple_extent_type(sid1); - VERIFY(ext_type, H5S_SCALAR, "H5Sget_simple_extent_type"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write to the dataset */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &space3_data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close scalar dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_h5s_scalar_write() */ - -/**************************************************************** -** -** test_h5s_scalar_read(): Test scalar H5S (dataspace) reading code. -** -****************************************************************/ -static void -test_h5s_scalar_read(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - int rank; /* Logical rank of dataspace */ - hsize_t tdims[4]; /* Dimension array to test with */ - hssize_t n; /* Number of dataspace elements */ - unsigned rdata; /* Scalar data read in */ - herr_t ret; /* Generic return value */ - H5S_class_t ext_type; /* Extent type */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Scalar Dataspace Manipulation during Reading\n")); - - /* Create file */ - fid1 = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Create a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - n = H5Sget_simple_extent_npoints(sid1); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, 1, "H5Sget_simple_extent_npoints"); - - rank = H5Sget_simple_extent_ndims(sid1); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); - - rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); - VERIFY(rank, 0, "H5Sget_simple_extent_dims"); - - /* Verify extent type */ - ext_type = H5Sget_simple_extent_type(sid1); - VERIFY(ext_type, H5S_SCALAR, "H5Sget_simple_extent_type"); - - ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - VERIFY(rdata, space3_data, "H5Dread"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close scalar dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_h5s_scalar_read() */ - -/**************************************************************** -** -** test_h5s_compound_scalar_write(): Test scalar H5S (dataspace) writing for -** compound datatypes. -** -****************************************************************/ -static void -test_h5s_compound_scalar_write(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t tid1; /* Attribute datatype ID */ - hid_t sid1; /* Dataspace ID */ - int rank; /* Logical rank of dataspace */ - hsize_t tdims[4]; /* Dimension array to test with */ - hssize_t n; /* Number of dataspace elements */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Scalar Dataspace Manipulation for Writing Compound Datatypes\n")); - - /* Create file */ - fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create the compound datatype. */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct space4_struct)); - CHECK(tid1, FAIL, "H5Tcreate"); - space4_field1_off = HOFFSET(struct space4_struct, c1); - ret = H5Tinsert(tid1, SPACE4_FIELDNAME1, space4_field1_off, H5T_NATIVE_SCHAR); - CHECK(ret, FAIL, "H5Tinsert"); - space4_field2_off = HOFFSET(struct space4_struct, u); - ret = H5Tinsert(tid1, SPACE4_FIELDNAME2, space4_field2_off, H5T_NATIVE_UINT); - CHECK(ret, FAIL, "H5Tinsert"); - space4_field3_off = HOFFSET(struct space4_struct, f); - ret = H5Tinsert(tid1, SPACE4_FIELDNAME3, space4_field3_off, H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - space4_field4_off = HOFFSET(struct space4_struct, c2); - ret = H5Tinsert(tid1, SPACE4_FIELDNAME4, space4_field4_off, H5T_NATIVE_SCHAR); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create scalar dataspace */ - sid1 = H5Screate_simple(SPACE3_RANK, NULL, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - n = H5Sget_simple_extent_npoints(sid1); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, 1, "H5Sget_simple_extent_npoints"); - - rank = H5Sget_simple_extent_ndims(sid1); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); - - rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); - VERIFY(rank, 0, "H5Sget_simple_extent_dims"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &space4_data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close compound datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close scalar dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_h5s_compound_scalar_write() */ - -/**************************************************************** -** -** test_h5s_compound_scalar_read(): Test scalar H5S (dataspace) reading for -** compound datatypes. -** -****************************************************************/ -static void -test_h5s_compound_scalar_read(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t type; /* Datatype */ - int rank; /* Logical rank of dataspace */ - hsize_t tdims[4]; /* Dimension array to test with */ - hssize_t n; /* Number of dataspace elements */ - struct space4_struct rdata; /* Scalar data read in */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Scalar Dataspace Manipulation for Reading Compound Datatypes\n")); - - /* Create file */ - fid1 = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Create a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - n = H5Sget_simple_extent_npoints(sid1); - CHECK(n, FAIL, "H5Sget_simple_extent_npoints"); - VERIFY(n, 1, "H5Sget_simple_extent_npoints"); - - rank = H5Sget_simple_extent_ndims(sid1); - CHECK(rank, FAIL, "H5Sget_simple_extent_ndims"); - VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims"); - - rank = H5Sget_simple_extent_dims(sid1, tdims, NULL); - VERIFY(rank, 0, "H5Sget_simple_extent_dims"); - - type = H5Dget_type(dataset); - CHECK(type, FAIL, "H5Dget_type"); - - ret = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (memcmp(&space4_data, &rdata, sizeof(struct space4_struct)) != 0) { - printf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n", space4_data.c1, rdata.c1); - printf("scalar data different: space4_data.u=%u, read_data4.u=%u\n", space4_data.u, rdata.u); - printf("scalar data different: space4_data.f=%f, read_data4.f=%f\n", (double)space4_data.f, - (double)rdata.f); - TestErrPrintf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n", space4_data.c1, - rdata.c2); - } /* end if */ - - /* Close datatype */ - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close scalar dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_h5s_compound_scalar_read() */ - -/* Data array sizes for chunk test */ -#define CHUNK_DATA_NX 50000 -#define CHUNK_DATA_NY 3 - -/**************************************************************** -** -** test_h5s_chunk(): Exercise chunked I/O, testing when data conversion -** is necessary and the entire chunk read in doesn't fit into the -** conversion buffer -** -****************************************************************/ -static void -test_h5s_chunk(void) -{ - herr_t status; - hid_t fileID, dsetID; - hid_t plist_id; - hid_t space_id; - hsize_t dims[2]; - hsize_t csize[2]; - double **chunk_data_dbl = NULL; - double *chunk_data_dbl_data = NULL; - float **chunk_data_flt = NULL; - float *chunk_data_flt_data = NULL; - int i, j; - - /* Allocate memory */ - chunk_data_dbl_data = (double *)calloc(CHUNK_DATA_NX * CHUNK_DATA_NY, sizeof(double)); - CHECK_PTR(chunk_data_dbl_data, "calloc"); - chunk_data_dbl = (double **)calloc(CHUNK_DATA_NX, sizeof(chunk_data_dbl_data)); - CHECK_PTR(chunk_data_dbl, "calloc"); - for (i = 0; i < CHUNK_DATA_NX; i++) - chunk_data_dbl[i] = chunk_data_dbl_data + (i * CHUNK_DATA_NY); - - chunk_data_flt_data = (float *)calloc(CHUNK_DATA_NX * CHUNK_DATA_NY, sizeof(float)); - CHECK_PTR(chunk_data_flt_data, "calloc"); - chunk_data_flt = (float **)calloc(CHUNK_DATA_NX, sizeof(chunk_data_flt_data)); - CHECK_PTR(chunk_data_flt, "calloc"); - for (i = 0; i < CHUNK_DATA_NX; i++) - chunk_data_flt[i] = chunk_data_flt_data + (i * CHUNK_DATA_NY); - - fileID = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fileID, FAIL, "H5Fcreate"); - - plist_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist_id, FAIL, "H5Pcreate"); - - csize[0] = CHUNK_DATA_NX; - csize[1] = CHUNK_DATA_NY; - status = H5Pset_chunk(plist_id, 2, csize); - CHECK(status, FAIL, "H5Pset_chunk"); - - /* Create the dataspace */ - dims[0] = CHUNK_DATA_NX; - dims[1] = CHUNK_DATA_NY; - space_id = H5Screate_simple(2, dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - dsetID = H5Dcreate2(fileID, "coords", H5T_NATIVE_FLOAT, space_id, H5P_DEFAULT, plist_id, H5P_DEFAULT); - CHECK(dsetID, FAIL, "H5Dcreate2"); - - /* Initialize float array */ - for (i = 0; i < CHUNK_DATA_NX; i++) - for (j = 0; j < CHUNK_DATA_NY; j++) - chunk_data_flt[i][j] = (float)(i + 1) * 2.5F - (float)j * 100.3F; - - status = H5Dwrite(dsetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_flt_data); - CHECK(status, FAIL, "H5Dwrite"); - - status = H5Pclose(plist_id); - CHECK(status, FAIL, "H5Pclose"); - status = H5Sclose(space_id); - CHECK(status, FAIL, "H5Sclose"); - status = H5Dclose(dsetID); - CHECK(status, FAIL, "H5Dclose"); - status = H5Fclose(fileID); - CHECK(status, FAIL, "H5Fclose"); - - /* Reset/initialize the data arrays to read in */ - memset(chunk_data_dbl_data, 0, sizeof(double) * CHUNK_DATA_NX * CHUNK_DATA_NY); - memset(chunk_data_flt_data, 0, sizeof(float) * CHUNK_DATA_NX * CHUNK_DATA_NY); - - fileID = H5Fopen(DATAFILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fileID, FAIL, "H5Fopen"); - dsetID = H5Dopen2(fileID, "coords", H5P_DEFAULT); - CHECK(dsetID, FAIL, "H5Dopen2"); - - status = H5Dread(dsetID, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_dbl_data); - CHECK(status, FAIL, "H5Dread"); - status = H5Dread(dsetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_flt_data); - CHECK(status, FAIL, "H5Dread"); - - status = H5Dclose(dsetID); - CHECK(status, FAIL, "H5Dclose"); - status = H5Fclose(fileID); - CHECK(status, FAIL, "H5Fclose"); - - for (i = 0; i < CHUNK_DATA_NX; i++) { - for (j = 0; j < CHUNK_DATA_NY; j++) { - /* Check if the two values are within 0.001% range. */ - if (!H5_DBL_REL_EQUAL(chunk_data_dbl[i][j], (double)chunk_data_flt[i][j], 0.00001)) - TestErrPrintf("%u: chunk_data_dbl[%d][%d]=%e, chunk_data_flt[%d][%d]=%e\n", - (unsigned)__LINE__, i, j, chunk_data_dbl[i][j], i, j, - (double)chunk_data_flt[i][j]); - } /* end for */ - } /* end for */ - - free(chunk_data_dbl); - free(chunk_data_dbl_data); - free(chunk_data_flt); - free(chunk_data_flt_data); -} /* test_h5s_chunk() */ - -/**************************************************************** -** -** test_h5s_extent_equal(): Exercise extent comparison code -** -****************************************************************/ -static void -test_h5s_extent_equal(void) -{ - hid_t null_space; /* Null dataspace */ - hid_t scalar_space; /* Scalar dataspace */ - hid_t d1_space1, d1_space2, d1_space3, d1_space4; /* 1-D dataspaces */ - hid_t d2_space1, d2_space2, d2_space3, d2_space4; /* 2-D dataspaces */ - hid_t d3_space1, d3_space2, d3_space3, d3_space4; /* 3-D dataspaces */ - hsize_t d1_dims1[1] = {10}, /* 1-D dimensions */ - d1_dims2[1] = {20}, d1_dims3[1] = {H5S_UNLIMITED}; - hsize_t d2_dims1[2] = {10, 10}, /* 2-D dimensions */ - d2_dims2[2] = {20, 20}, d2_dims3[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; - hsize_t d3_dims1[3] = {10, 10, 10}, /* 3-D dimensions */ - d3_dims2[3] = {20, 20, 20}, d3_dims3[3] = {H5S_UNLIMITED, H5S_UNLIMITED, H5S_UNLIMITED}; - htri_t ext_equal; /* Whether two dataspace extents are equal */ - herr_t ret; /* Generic error return */ - - /* Create dataspaces */ - null_space = H5Screate(H5S_NULL); - CHECK(null_space, FAIL, "H5Screate"); - - scalar_space = H5Screate(H5S_SCALAR); - CHECK(scalar_space, FAIL, "H5Screate"); - - d1_space1 = H5Screate_simple(1, d1_dims1, NULL); - CHECK(d1_space1, FAIL, "H5Screate"); - d1_space2 = H5Screate_simple(1, d1_dims2, NULL); - CHECK(d1_space2, FAIL, "H5Screate"); - d1_space3 = H5Screate_simple(1, d1_dims1, d1_dims2); - CHECK(d1_space3, FAIL, "H5Screate"); - d1_space4 = H5Screate_simple(1, d1_dims1, d1_dims3); - CHECK(d1_space4, FAIL, "H5Screate"); - - d2_space1 = H5Screate_simple(2, d2_dims1, NULL); - CHECK(d2_space1, FAIL, "H5Screate"); - d2_space2 = H5Screate_simple(2, d2_dims2, NULL); - CHECK(d2_space2, FAIL, "H5Screate"); - d2_space3 = H5Screate_simple(2, d2_dims1, d2_dims2); - CHECK(d2_space3, FAIL, "H5Screate"); - d2_space4 = H5Screate_simple(2, d2_dims1, d2_dims3); - CHECK(d2_space4, FAIL, "H5Screate"); - - d3_space1 = H5Screate_simple(3, d3_dims1, NULL); - CHECK(d3_space1, FAIL, "H5Screate"); - d3_space2 = H5Screate_simple(3, d3_dims2, NULL); - CHECK(d3_space2, FAIL, "H5Screate"); - d3_space3 = H5Screate_simple(3, d3_dims1, d3_dims2); - CHECK(d3_space3, FAIL, "H5Screate"); - d3_space4 = H5Screate_simple(3, d3_dims1, d3_dims3); - CHECK(d3_space4, FAIL, "H5Screate"); - - /* Compare all dataspace combinations */ - - /* Compare null dataspace against all others, including itself */ - ext_equal = H5Sextent_equal(null_space, null_space); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(null_space, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare scalar dataspace against all others, including itself */ - ext_equal = H5Sextent_equal(scalar_space, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, scalar_space); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(scalar_space, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 1-D dataspace w/no max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d1_space1, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d1_space1); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space1, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare larger 1-D dataspace w/no max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d1_space2, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d1_space2); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space2, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 1-D dataspace w/fixed max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d1_space3, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d1_space3); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space3, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 1-D dataspace w/unlimited max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d1_space4, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d1_space4); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d1_space4, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 2-D dataspace w/no max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d2_space1, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d2_space1); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space1, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare larger 2-D dataspace w/no max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d2_space2, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d2_space2); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space2, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 2-D dataspace w/fixed max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d2_space3, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d2_space3); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space3, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 2-D dataspace w/unlimited max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d2_space4, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d2_space4); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d2_space4, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 3-D dataspace w/no max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d3_space1, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d3_space1); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space1, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare larger 2-D dataspace w/no max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d3_space2, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d3_space2); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space2, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 2-D dataspace w/fixed max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d3_space3, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d3_space3); - VERIFY(ext_equal, true, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space3, d3_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - - /* Compare small 2-D dataspace w/unlimited max. dims against all others, including itself */ - ext_equal = H5Sextent_equal(d3_space4, null_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, scalar_space); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d1_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d1_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d1_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d1_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d2_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d2_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d2_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d2_space4); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d3_space1); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d3_space2); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d3_space3); - VERIFY(ext_equal, false, "H5Sextent_equal"); - ext_equal = H5Sextent_equal(d3_space4, d3_space4); - VERIFY(ext_equal, true, "H5Sextent_equal"); - - /* Close dataspaces */ - ret = H5Sclose(null_space); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(scalar_space); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(d1_space1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d1_space2); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d1_space3); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d1_space4); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(d2_space1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d2_space2); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d2_space3); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d2_space4); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(d3_space1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d3_space2); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d3_space3); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(d3_space4); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_h5s_extent_equal() */ - -/**************************************************************** -** -** test_h5s_extent_copy(): Exercise extent copy code -** -****************************************************************/ -static void -test_h5s_extent_copy(void) -{ - hid_t spaces[14] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; /* Array of all dataspaces */ - hid_t tmp_space = -1; - hsize_t d1_dims1[1] = {10}, /* 1-D dimensions */ - d1_dims2[1] = {20}, d1_dims3[1] = {H5S_UNLIMITED}; - hsize_t d2_dims1[2] = {10, 10}, /* 2-D dimensions */ - d2_dims2[2] = {20, 20}, d2_dims3[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; - hsize_t d3_dims1[3] = {10, 10, 10}, /* 3-D dimensions */ - d3_dims2[3] = {20, 20, 20}, d3_dims3[3] = {H5S_UNLIMITED, H5S_UNLIMITED, H5S_UNLIMITED}; - hsize_t npoints[14]; /* Expected number of points in selection for each element in spaces */ - hssize_t npoints_ret; /* Number of points returned by H5Sget_select_npoints() */ - htri_t ext_equal; /* Whether two dataspace extents are equal */ - const unsigned num_spaces = sizeof(spaces) / sizeof(spaces[0]); - unsigned i, j; - herr_t ret; /* Generic error return */ - - /* Create dataspaces */ - spaces[0] = H5Screate(H5S_NULL); - CHECK(spaces[0], FAIL, "H5Screate"); - npoints[0] = (hsize_t)0; - - spaces[1] = H5Screate(H5S_SCALAR); - CHECK(spaces[1], FAIL, "H5Screate"); - npoints[1] = (hsize_t)1; - - spaces[2] = H5Screate_simple(1, d1_dims1, NULL); - CHECK(spaces[2], FAIL, "H5Screate"); - npoints[2] = d1_dims1[0]; - spaces[3] = H5Screate_simple(1, d1_dims2, NULL); - CHECK(spaces[3], FAIL, "H5Screate"); - npoints[3] = d1_dims2[0]; - spaces[4] = H5Screate_simple(1, d1_dims1, d1_dims2); - CHECK(spaces[4], FAIL, "H5Screate"); - npoints[4] = d1_dims1[0]; - spaces[5] = H5Screate_simple(1, d1_dims1, d1_dims3); - CHECK(spaces[5], FAIL, "H5Screate"); - npoints[5] = d1_dims1[0]; - - spaces[6] = H5Screate_simple(2, d2_dims1, NULL); - CHECK(spaces[6], FAIL, "H5Screate"); - npoints[6] = d2_dims1[0] * d2_dims1[1]; - spaces[7] = H5Screate_simple(2, d2_dims2, NULL); - CHECK(spaces[7], FAIL, "H5Screate"); - npoints[7] = d2_dims2[0] * d2_dims2[1]; - spaces[8] = H5Screate_simple(2, d2_dims1, d2_dims2); - CHECK(spaces[8], FAIL, "H5Screate"); - npoints[8] = d2_dims1[0] * d2_dims1[1]; - spaces[9] = H5Screate_simple(2, d2_dims1, d2_dims3); - CHECK(spaces[9], FAIL, "H5Screate"); - npoints[9] = d2_dims1[0] * d2_dims1[1]; - - spaces[10] = H5Screate_simple(3, d3_dims1, NULL); - CHECK(spaces[10], FAIL, "H5Screate"); - npoints[10] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2]; - spaces[11] = H5Screate_simple(3, d3_dims2, NULL); - CHECK(spaces[11], FAIL, "H5Screate"); - npoints[11] = d3_dims2[0] * d3_dims2[1] * d3_dims2[2]; - spaces[12] = H5Screate_simple(3, d3_dims1, d3_dims2); - CHECK(spaces[12], FAIL, "H5Screate"); - npoints[12] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2]; - spaces[13] = H5Screate_simple(3, d3_dims1, d3_dims3); - CHECK(spaces[13], FAIL, "H5Screate"); - npoints[13] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2]; - - tmp_space = H5Screate(H5S_NULL); - CHECK(tmp_space, FAIL, "H5Screate"); - - /* Copy between all dataspace combinations. Note there are a few - * duplicates. */ - for (i = 0; i < num_spaces; i++) - for (j = i; j < num_spaces; j++) { - /* Copy from i to j, unless the inner loop just restarted, in which - * case i and j are the same, so the second call to H5Sextent_copy() - * will test copying from i/j to i/j */ - ret = H5Sextent_copy(tmp_space, spaces[j]); - CHECK(ret, FAIL, "H5Sextent_copy"); - - /* Verify that the extents are equal */ - ext_equal = H5Sextent_equal(tmp_space, spaces[j]); - VERIFY(ext_equal, true, "H5Sextent_equal"); - - /* Verify that the correct number of elements is selected */ - npoints_ret = H5Sget_select_npoints(tmp_space); - VERIFY((hsize_t)npoints_ret, npoints[j], "H5Sget_select_npoints"); - - /* Copy from j to i */ - ret = H5Sextent_copy(tmp_space, spaces[i]); - CHECK(ret, FAIL, "H5Sextent_copy"); - - /* Verify that the extents are equal */ - ext_equal = H5Sextent_equal(tmp_space, spaces[i]); - VERIFY(ext_equal, true, "H5Sextent_equal"); - - /* Verify that the correct number of elements is selected */ - npoints_ret = H5Sget_select_npoints(tmp_space); - VERIFY((hsize_t)npoints_ret, npoints[i], "H5Sget_select_npoints"); - } /* end for */ - - /* Close dataspaces */ - for (i = 0; i < num_spaces; i++) { - ret = H5Sclose(spaces[i]); - CHECK(ret, FAIL, "H5Sclose"); - spaces[i] = -1; - } /* end for */ - - ret = H5Sclose(tmp_space); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_h5s_extent_copy() */ - -/**************************************************************** -** -** test_h5s_bug1(): Test Creating dataspace with H5Screate then -* setting extent with H5Sextent_copy. -** -****************************************************************/ -static void -test_h5s_bug1(void) -{ - hid_t space1; /* Dataspace to copy extent to */ - hid_t space2; /* Scalar dataspace */ - hsize_t dims[2] = {10, 10}; /* Dimensions */ - hsize_t start[2] = {0, 0}; /* Hyperslab start */ - htri_t select_valid; /* Whether the dataspace selection is valid */ - herr_t ret; /* Generic error return */ - - /* Create dataspaces */ - space1 = H5Screate(H5S_SIMPLE); - CHECK(space1, FAIL, "H5Screate"); - space2 = H5Screate_simple(2, dims, NULL); - CHECK(space2, FAIL, "H5Screate"); - - /* Copy extent to space1 */ - ret = H5Sextent_copy(space1, space2); - CHECK(ret, FAIL, "H5Sextent_copy"); - - /* Select hyperslab in space1 containing entire extent */ - ret = H5Sselect_hyperslab(space1, H5S_SELECT_SET, start, NULL, dims, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Check that space1's selection is valid */ - select_valid = H5Sselect_valid(space1); - CHECK(select_valid, FAIL, "H5Sselect_valid"); - VERIFY(select_valid, true, "H5Sselect_valid result"); - - /* Close dataspaces */ - ret = H5Sclose(space1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(space2); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_h5s_bug1() */ - -/**************************************************************** -** -** test_h5s_bug2(): Test combining hyperslabs in a way that used -** to trip up H5S__hyper_update_diminfo() -** -****************************************************************/ -static void -test_h5s_bug2(void) -{ - hid_t space; /* Dataspace to copy extent to */ - hsize_t dims[2] = {1, 5}; /* Dimensions */ - hsize_t start[2] = {0, 0}; /* Hyperslab start */ - hsize_t count[2] = {1, 1}; /* Hyperslab start */ - htri_t select_valid; /* Whether the dataspace selection is valid */ - hssize_t elements_selected; /* Number of elements selected */ - herr_t ret; /* Generic error return */ - - /* Create dataspace */ - space = H5Screate_simple(2, dims, NULL); - CHECK(space, FAIL, "H5Screate"); - - /* Select hyperslab in space containing first element */ - ret = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Add hyperslab in space containing last element */ - start[1] = 4; - ret = H5Sselect_hyperslab(space, H5S_SELECT_OR, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Add hyperslab in space containing the first 3 elements */ - start[1] = 0; - count[1] = 3; - ret = H5Sselect_hyperslab(space, H5S_SELECT_OR, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Check that space's selection is valid */ - select_valid = H5Sselect_valid(space); - CHECK(select_valid, FAIL, "H5Sselect_valid"); - VERIFY(select_valid, true, "H5Sselect_valid result"); - - /* Check that 4 elements are selected */ - elements_selected = H5Sget_select_npoints(space); - CHECK(elements_selected, FAIL, "H5Sselect_valid"); - VERIFY(elements_selected, 4, "H5Sselect_valid result"); - - /* Close dataspaces */ - ret = H5Sclose(space); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_h5s_bug2() */ - -/*------------------------------------------------------------------------- - * Function: test_versionbounds - * - * Purpose: Tests version bounds with dataspace. - * - * Description: - * This function creates a file with lower bounds then later - * reopens it with higher bounds to show that the dataspace - * version is upgraded appropriately. - * - * Return: Success: 0 - * Failure: number of errors - * - *------------------------------------------------------------------------- - */ -#define VERBFNAME "tverbounds_dspace.h5" -#define BASIC_DSET "Basic Dataset" -#define LATEST_DSET "Latest Dataset" -static void -test_versionbounds(void) -{ - hid_t file = -1; /* File ID */ - hid_t space = -1; /* Dataspace ID */ - hid_t dset = -1; /* Dataset ID */ - hid_t fapl = -1; /* File access property list ID */ - hid_t dset_space = -1; /* Retrieved dataset's dataspace ID */ - hsize_t dim[1]; /* Dataset dimensions */ - H5F_libver_t low, high; /* File format bounds */ -#if 0 - H5S_t *spacep = NULL; /* Pointer to internal dataspace */ -#endif - herr_t ret = 0; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Version Bounds\n")); - - /* Create a file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Create dataspace */ - dim[0] = 10; - space = H5Screate_simple(1, dim, NULL); - CHECK(space, FAIL, "H5Screate"); -#if 0 - /* Its version should be H5O_SDSPACE_VERSION_1 */ - spacep = (H5S_t *)H5I_object(space); - CHECK_PTR(spacep, "H5I_object"); - VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound"); -#endif - - /* Set high bound to V18 */ - low = H5F_LIBVER_EARLIEST; - high = H5F_LIBVER_V18; - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the file */ - file = H5Fcreate(VERBFNAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create a basic dataset */ - dset = H5Dcreate2(file, BASIC_DSET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - if (dset > 0) /* dataset created successfully */ - { - /* Get the internal dataspace pointer */ - dset_space = H5Dget_space(dset); - CHECK(dset_space, FAIL, "H5Dget_space"); -#if 0 - spacep = (H5S_t *)H5I_object(dset_space); - CHECK_PTR(spacep, "H5I_object"); - - /* Dataspace version should remain as H5O_SDSPACE_VERSION_1 */ - VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound"); -#endif - /* Close dataspace */ - ret = H5Sclose(dset_space); - CHECK(ret, FAIL, "H5Sclose"); - } - - /* Close basic dataset and the file */ - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Set low and high bounds to latest to trigger the increment of the - dataspace version */ - low = H5F_LIBVER_LATEST; - high = H5F_LIBVER_LATEST; - ret = H5Pset_libver_bounds(fapl, low, high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Reopen the file with new version bounds, LATEST/LATEST */ - file = H5Fopen(VERBFNAME, H5F_ACC_RDWR, fapl); - - /* Create another dataset using the same dspace as the previous dataset */ - dset = H5Dcreate2(file, LATEST_DSET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - /* Dataset created successfully. Verify that dataspace version has been - upgraded per the low bound */ - - /* Get the internal dataspace pointer */ - dset_space = H5Dget_space(dset); - CHECK(dset_space, FAIL, "H5Dget_space"); -#if 0 - spacep = (H5S_t *)H5I_object(dset_space); - CHECK_PTR(spacep, "H5I_object"); - - /* Verify the dataspace version */ - VERIFY(spacep->extent.version, H5O_sdspace_ver_bounds[low], "upgraded dataspace version"); -#endif - /* Close everything */ - ret = H5Sclose(dset_space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_versionbounds() */ - -/**************************************************************** -** -** test_h5s(): Main H5S (dataspace) testing routine. -** -****************************************************************/ -void -test_h5s(void) -{ - H5F_libver_t low, high; /* Low and high bounds */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataspaces\n")); - - test_h5s_basic(); /* Test basic H5S code */ - test_h5s_null(); /* Test Null dataspace H5S code */ - test_h5s_zero_dim(); /* Test dataspace with zero dimension size */ -#if 0 - /* Loop through all the combinations of low/high version bounds */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - - /* Invalid combinations, just continue */ - if (high == H5F_LIBVER_EARLIEST || high < low) - continue; -#else - low = H5F_LIBVER_LATEST; - high = H5F_LIBVER_LATEST; -#endif - test_h5s_encode(low, high); /* Test encoding and decoding */ - test_h5s_encode_regular_hyper(low, high); /* Test encoding regular hyperslabs */ - test_h5s_encode_irregular_hyper(low, high); /* Test encoding irregular hyperslabs */ - test_h5s_encode_points(low, high); /* Test encoding points */ -#if 0 - } /* end high bound */ - } /* end low bound */ -#endif - test_h5s_encode_length(); /* Test version 2 hyperslab encoding length is correct */ -#ifndef H5_NO_DEPRECATED_SYMBOLS - test_h5s_encode1(); /* Test operations with old API routine (H5Sencode1) */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - - test_h5s_scalar_write(); /* Test scalar H5S writing code */ - test_h5s_scalar_read(); /* Test scalar H5S reading code */ - - test_h5s_compound_scalar_write(); /* Test compound datatype scalar H5S writing code */ - test_h5s_compound_scalar_read(); /* Test compound datatype scalar H5S reading code */ - - /* This test was added later to exercise a bug in chunked I/O */ - test_h5s_chunk(); /* Exercise bug fix for chunked I/O */ - - test_h5s_extent_equal(); /* Test extent comparison code */ - test_h5s_extent_copy(); /* Test extent copy code */ - test_h5s_bug1(); /* Test bug in offset initialization */ - test_h5s_bug2(); /* Test bug found in H5S__hyper_update_diminfo() */ - test_versionbounds(); /* Test version bounds with dataspace */ -} /* test_h5s() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_h5s - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_h5s(void) -{ - H5Fdelete(DATAFILE, H5P_DEFAULT); - H5Fdelete(NULLFILE, H5P_DEFAULT); - H5Fdelete(BASICFILE, H5P_DEFAULT); - H5Fdelete(ZEROFILE, H5P_DEFAULT); - H5Fdelete(VERBFNAME, H5P_DEFAULT); -} diff --git a/test/API/tid.c b/test/API/tid.c deleted file mode 100644 index 649bacb89f2..00000000000 --- a/test/API/tid.c +++ /dev/null @@ -1,1413 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* Test user-created identifiers (hid_t's) and identifier types. */ - -#include "testhdf5.h" - -#if 0 -/* Include H5Ipkg.h to calculate max number of groups */ -#define H5I_FRIEND /*suppress error about including H5Ipkg */ -#include "H5Ipkg.h" -#endif - -/* - * Number of bits to use for ID Type in each ID. Increase if more types - * are needed (though this will decrease the number of available IDs per - * type). This is the only number that must be changed since all other bit - * field sizes and masks are calculated from TYPE_BITS. - */ -#define TYPE_BITS 7 -#define TYPE_MASK (((hid_t)1 << TYPE_BITS) - 1) - -#define H5I_MAX_NUM_TYPES TYPE_MASK - -static herr_t -free_wrapper(void *p, void H5_ATTR_UNUSED **_ctx) -{ - free(p); - return SUCCEED; -} - -/* Test basic functionality of registering and deleting types and IDs */ -static int -basic_id_test(void) -{ - H5I_type_t myType = H5I_BADID; - hid_t arrayID = H5I_INVALID_HID; - void *testObj = NULL; - void *testPtr = NULL; - char nameString[10]; - hid_t testID; - ssize_t testSize = -1; - herr_t err; - int num_ref; - hsize_t num_members; - - /* Try to register an ID with fictitious types */ - H5E_BEGIN_TRY - arrayID = H5Iregister((H5I_type_t)420, testObj); - H5E_END_TRY - - VERIFY(arrayID, H5I_INVALID_HID, "H5Iregister"); - if (arrayID != H5I_INVALID_HID) - goto out; - - H5E_BEGIN_TRY - arrayID = H5Iregister((H5I_type_t)-1, testObj); - H5E_END_TRY - - VERIFY(arrayID, H5I_INVALID_HID, "H5Iregister"); - if (arrayID != H5I_INVALID_HID) - goto out; - - /* Try to access IDs with fictitious types */ - H5E_BEGIN_TRY - testPtr = H5Iobject_verify((hid_t)100, (H5I_type_t)0); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); - if (testPtr != NULL) - goto out; - - H5E_BEGIN_TRY - testPtr = H5Iobject_verify((hid_t)700, (H5I_type_t)700); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); - if (testPtr != NULL) - goto out; - - /* Register a type */ - myType = H5Iregister_type((size_t)64, 0, free_wrapper); - - CHECK(myType, H5I_BADID, "H5Iregister_type"); - if (myType == H5I_BADID) - goto out; - - /* Register an ID and retrieve the object it points to. - * Once the ID has been registered, testObj will be freed when - * its ID type is destroyed. - */ - testObj = malloc(7 * sizeof(int)); - arrayID = H5Iregister(myType, testObj); - - CHECK(arrayID, H5I_INVALID_HID, "H5Iregister"); - if (arrayID == H5I_INVALID_HID) { - free(testObj); - goto out; - } - - testPtr = (int *)H5Iobject_verify(arrayID, myType); - - CHECK_PTR_EQ(testPtr, testObj, "H5Iobject_verify"); - if (testPtr != testObj) - goto out; - - /* Ensure that H5Iget_file_id and H5Iget_name() fail, since this - * is an hid_t for the wrong kind of object - */ - H5E_BEGIN_TRY - testID = H5Iget_file_id(arrayID); - H5E_END_TRY - - VERIFY(testID, H5I_INVALID_HID, "H5Iget_file_id"); - if (testID != H5I_INVALID_HID) - goto out; - - H5E_BEGIN_TRY - testSize = H5Iget_name(arrayID, nameString, (size_t)9); - H5E_END_TRY - - VERIFY(testSize, -1, "H5Iget_name"); - if (testSize != -1) - goto out; - - /* Make sure H5Iremove_verify catches objects of the wrong type */ - H5E_BEGIN_TRY - testPtr = (int *)H5Iremove_verify(arrayID, (H5I_type_t)0); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Iremove_verify"); - if (testPtr != NULL) - goto out; - - H5E_BEGIN_TRY - testPtr = (int *)H5Iremove_verify(arrayID, (H5I_type_t)((int)myType - 1)); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Iremove_verify"); - if (testPtr != NULL) - goto out; - - /* Remove an ID and make sure we can't access it */ - testPtr = (int *)H5Iremove_verify(arrayID, myType); - - CHECK_PTR(testPtr, "H5Iremove_verify"); - if (testPtr == NULL) - goto out; - - H5E_BEGIN_TRY - testPtr = (int *)H5Iobject_verify(arrayID, myType); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); - if (testPtr != NULL) - goto out; - - /* Delete the type and make sure we can't access objects within it */ - arrayID = H5Iregister(myType, testObj); - - err = H5Idestroy_type(myType); - VERIFY(err, 0, "H5Idestroy_type"); - if (err != 0) - goto out; - VERIFY(H5Itype_exists(myType), 0, "H5Itype_exists"); - if (H5Itype_exists(myType) != 0) - goto out; - - H5E_BEGIN_TRY - VERIFY(H5Inmembers(myType, NULL), -1, "H5Inmembers"); - if (H5Inmembers(myType, NULL) != -1) - goto out; - H5E_END_TRY - - /* Register another type and another object in that type */ - myType = H5Iregister_type((size_t)64, 0, free_wrapper); - - CHECK(myType, H5I_BADID, "H5Iregister_type"); - if (myType == H5I_BADID) - goto out; - - /* The memory that testObj pointed to should already have been - * freed when the previous type was destroyed. Allocate new - * memory for it. - */ - testObj = malloc(7 * sizeof(int)); - arrayID = H5Iregister(myType, testObj); - - CHECK(arrayID, H5I_INVALID_HID, "H5Iregister"); - if (arrayID == H5I_INVALID_HID) { - free(testObj); - goto out; - } - - err = H5Inmembers(myType, &num_members); - CHECK(err, -1, "H5Inmembers"); - if (err < 0) - goto out; - VERIFY(num_members, 1, "H5Inmembers"); - if (num_members != 1) - goto out; - - /* Increment references to type and ensure that dec_type_ref - * doesn't destroy the type - */ - num_ref = H5Iinc_type_ref(myType); - VERIFY(num_ref, 2, "H5Iinc_type_ref"); - if (num_ref != 2) - goto out; - num_ref = H5Idec_type_ref(myType); - VERIFY(num_ref, 1, "H5Idec_type_ref"); - if (num_ref != 1) - goto out; - err = H5Inmembers(myType, &num_members); - CHECK(err, -1, "H5Inmembers"); - if (err < 0) - goto out; - VERIFY(num_members, 1, "H5Inmembers"); - if (num_members != 1) - goto out; - - /* This call to dec_type_ref should destroy the type */ - num_ref = H5Idec_type_ref(myType); - VERIFY(num_ref, 0, "H5Idec_type_ref"); - if (num_ref != 0) - goto out; - VERIFY(H5Itype_exists(myType), 0, "H5Itype_exists"); - if (H5Itype_exists(myType) != 0) - goto out; - - H5E_BEGIN_TRY - err = H5Inmembers(myType, &num_members); - if (err >= 0) - goto out; - H5E_END_TRY - - return 0; - -out: - /* Clean up type if it has been allocated and free memory used - * by testObj - */ - if (myType >= 0) - H5Idestroy_type(myType); - - return -1; -} - -/* A dummy search function for the next test */ -static int -test_search_func(void H5_ATTR_UNUSED *ptr1, hid_t H5_ATTR_UNUSED id, void H5_ATTR_UNUSED *ptr2) -{ - return 0; -} - -/* Ensure that public functions cannot access "predefined" ID types */ -static int -id_predefined_test(void) -{ - void *testObj; - hid_t testID; - hid_t typeID = H5I_INVALID_HID; - void *testPtr; - herr_t testErr; - - testObj = malloc(sizeof(int)); - - /* - * Attempt to perform public functions on various library types - */ - - H5E_BEGIN_TRY - testID = H5Iregister(H5I_FILE, testObj); - H5E_END_TRY - - VERIFY(testID, H5I_INVALID_HID, "H5Iregister"); - if (testID != H5I_INVALID_HID) - goto out; - - H5E_BEGIN_TRY - testPtr = H5Isearch(H5I_GENPROP_LST, test_search_func, testObj); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Isearch"); - if (testPtr != NULL) - goto out; - - H5E_BEGIN_TRY - testErr = H5Inmembers(H5I_ERROR_STACK, NULL); - H5E_END_TRY - - VERIFY(testErr, -1, "H5Inmembers"); - if (testErr != -1) - goto out; - - H5E_BEGIN_TRY - testErr = H5Iclear_type(H5I_FILE, 0); - H5E_END_TRY - - VERIFY((testErr >= 0), 0, "H5Iclear_type"); - if (testErr >= 0) - goto out; - - H5E_BEGIN_TRY - testErr = H5Idestroy_type(H5I_DATASET); - H5E_END_TRY - - VERIFY((testErr >= 0), 0, "H5Idestroy_type"); - if (testErr >= 0) - goto out; - - H5E_BEGIN_TRY - testErr = H5Itype_exists(H5I_GROUP); - H5E_END_TRY - - VERIFY(testErr, -1, "H5Itype_exists"); - if (testErr != -1) - goto out; - - H5E_BEGIN_TRY - testErr = H5Itype_exists(H5I_ATTR); - H5E_END_TRY - - VERIFY(testErr, -1, "H5Itype_exists"); - if (testErr != -1) - goto out; - - /* - * Create a datatype ID and try to perform illegal functions on it - */ - - typeID = H5Tcreate(H5T_OPAQUE, (size_t)42); - CHECK(typeID, H5I_INVALID_HID, "H5Tcreate"); - if (typeID == H5I_INVALID_HID) - goto out; - - H5E_BEGIN_TRY - testPtr = H5Iremove_verify(typeID, H5I_DATATYPE); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Iremove_verify"); - if (testPtr != NULL) - goto out; - - H5E_BEGIN_TRY - testPtr = H5Iobject_verify(typeID, H5I_DATATYPE); - H5E_END_TRY - - CHECK_PTR_NULL(testPtr, "H5Iobject_verify"); - if (testPtr != NULL) - goto out; - - H5Tclose(typeID); - - /* testObj was never registered as an atom, so it will not be - * automatically freed. */ - free(testObj); - return 0; - -out: - if (typeID != H5I_INVALID_HID) - H5Tclose(typeID); - if (testObj != NULL) - free(testObj); - - return -1; -} - -/* Test the H5Iis_valid function */ -static int -test_is_valid(void) -{ - hid_t dtype; /* datatype id */ -#if 0 - int64_t nmembs1; /* number of type memnbers */ - int64_t nmembs2; -#endif - htri_t tri_ret; /* htri_t return value */ -#if 0 - herr_t ret; /* return value */ -#endif - - /* Create a datatype id */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - if (dtype < 0) - goto out; - - /* Check that the ID is valid */ - tri_ret = H5Iis_valid(dtype); - VERIFY(tri_ret, true, "H5Iis_valid"); - if (tri_ret != true) - goto out; -#if 0 /* Cannot call internal APIs and cannot call public H5Inmembers on library types */ - /* Artificially manipulate the reference counts so app_count is 0, and dtype - * appears to be an internal id. This takes advantage of the fact that - * H5Ipkg is included. - */ - ret = H5I_inc_ref(dtype, false); - CHECK(ret, FAIL, "H5I_inc_ref"); - if (ret < 0) - goto out; - ret = H5I_dec_app_ref(dtype); - CHECK(ret, FAIL, "H5I_dec_ref"); - if (ret < 0) - goto out; - - /* Check that dtype is invalid */ - tri_ret = H5Iis_valid(dtype); - VERIFY(tri_ret, false, "H5Iis_valid"); - if (tri_ret != false) - goto out; - - /* Close dtype and verify that it has been closed */ - nmembs1 = H5I_nmembers(H5I_DATATYPE); - CHECK(nmembs1, FAIL, "H5I_nmembers"); - if (nmembs1 < 0) - goto out; - ret = H5I_dec_ref(dtype); - CHECK(ret, FAIL, "H5I_dec_ref"); - if (ret < 0) - goto out; - nmembs2 = H5I_nmembers(H5I_DATATYPE); - VERIFY(nmembs2, nmembs1 - 1, "H5I_nmembers"); - if (nmembs2 != nmembs1 - 1) - goto out; - - /* Check that dtype is invalid */ - tri_ret = H5Iis_valid(dtype); - VERIFY(tri_ret, false, "H5Iis_valid"); - if (tri_ret != false) - goto out; -#endif - /* Check that an id of -1 is invalid */ - tri_ret = H5Iis_valid((hid_t)-1); - VERIFY(tri_ret, false, "H4Iis_valid"); - if (tri_ret != false) - goto out; - - return 0; - -out: - /* Don't attempt to close dtype as we don't know the exact state of the - * reference counts. Every state in this function will be automatically - * closed at library exit anyways, as internal count is never > 1. - */ - return -1; -} - -/* Test the H5Iget_type function */ -static int -test_get_type(void) -{ - hid_t dtype; /* datatype id */ - H5I_type_t type_ret; /* return value */ - - /* Create a datatype id */ - dtype = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype, FAIL, "H5Tcopy"); - if (dtype < 0) - goto out; - - /* Check that the ID is correct */ - type_ret = H5Iget_type(dtype); - VERIFY(type_ret, H5I_DATATYPE, "H5Iget_type"); - if (type_ret == H5I_BADID) - goto out; - - /* Check that the ID is correct */ - type_ret = H5Iget_type((hid_t)H5T_STRING); - VERIFY(type_ret, H5I_BADID, "H5Iget_type"); - if (type_ret != H5I_BADID) - goto out; - - /* Check that the ID is correct */ - type_ret = H5Iget_type((hid_t)-1); - VERIFY(type_ret, H5I_BADID, "H5Iget_type"); - if (type_ret != H5I_BADID) - goto out; - - H5Tclose(dtype); - - return 0; - -out: - if (dtype != H5I_INVALID_HID) - H5Tclose(dtype); - - return -1; -} - -/* Test boundary cases with lots of types */ - -/* Type IDs range from H5I_NTYPES to H5I_MAX_NUM_TYPES. The system will assign */ -/* IDs in sequential order until H5I_MAX_NUM_TYPES IDs have been given out, at which */ -/* point it will search for type IDs that were allocated but have since been */ -/* deleted. */ -/* This test will allocate IDs up to H5I_MAX_NUM_TYPES, ensure that IDs wrap around */ -/* to low values successfully, ensure that an error is thrown when all possible */ -/* type IDs are taken, then ensure that deleting types frees up their IDs. */ -/* Note that this test depends on the implementation of IDs, so may break */ -/* if the implementation changes. */ -/* Also note that if someone else registered a user-defined type and forgot to */ -/* destroy it, this test will mysteriously fail (because it will expect there to */ -/* be one more "free" type ID than there is). */ -/* H5I_NTYPES is defined in h5public.h, H5I_MAX_NUM_TYPES is defined in h5pkg.h */ -static int -test_id_type_list(void) -{ - H5I_type_t startType; /* The first type ID we were assigned in this test */ - H5I_type_t currentType; - H5I_type_t testType; - int i; /* Just a counter variable */ - - startType = H5Iregister_type((size_t)8, 0, free_wrapper); - CHECK(startType, H5I_BADID, "H5Iregister_type"); - if (startType == H5I_BADID) - goto out; - - /* Sanity check */ - if ((int)startType >= H5I_MAX_NUM_TYPES || startType < H5I_NTYPES) { - /* Error condition, throw an error */ - ERROR("H5Iregister_type"); - goto out; - } - /* Create types up to H5I_MAX_NUM_TYPES */ - for (i = startType + 1; i < H5I_MAX_NUM_TYPES; i++) { - currentType = H5Iregister_type((size_t)8, 0, free_wrapper); - CHECK(currentType, H5I_BADID, "H5Iregister_type"); - if (currentType == H5I_BADID) - goto out; - } - - /* Wrap around to low type ID numbers */ - for (i = H5I_NTYPES; i < startType; i++) { - currentType = H5Iregister_type((size_t)8, 0, free_wrapper); - CHECK(currentType, H5I_BADID, "H5Iregister_type"); - if (currentType == H5I_BADID) - goto out; - } - - /* There should be no room at the inn for a new ID type*/ - H5E_BEGIN_TRY - testType = H5Iregister_type((size_t)8, 0, free_wrapper); - H5E_END_TRY - - VERIFY(testType, H5I_BADID, "H5Iregister_type"); - if (testType != H5I_BADID) - goto out; - - /* Now delete a type and try to insert again */ - H5Idestroy_type(H5I_NTYPES); - testType = H5Iregister_type((size_t)8, 0, free_wrapper); - - VERIFY(testType, H5I_NTYPES, "H5Iregister_type"); - if (testType != H5I_NTYPES) - goto out; - - /* Cleanup. Destroy all types. */ - for (i = H5I_NTYPES; i < H5I_MAX_NUM_TYPES; i++) - H5Idestroy_type((H5I_type_t)i); - - return 0; - -out: - /* Cleanup. For simplicity, just destroy all types and ignore errors. */ - H5E_BEGIN_TRY - for (i = H5I_NTYPES; i < H5I_MAX_NUM_TYPES; i++) - H5Idestroy_type((H5I_type_t)i); - H5E_END_TRY - return -1; -} - -/* Test removing ids in callback for H5Iclear_type */ - -/* There was a rare bug where, if an id free callback being called by - * H5I_clear_type() removed another id in that type, a segfault could occur. - * This test tests for that error (and freeing ids "out of order" within - * H5Iclear_type() in general). - * - * NB: RCT = "remove clear type" - */ - -/* Macro definitions */ -#define RCT_MAX_NOBJS 25 /* Maximum number of objects in the list */ -#define RCT_MIN_NOBJS 5 -#define RCT_NITER 50 /* Number of times we cycle through object creation and deletion */ - -/* Structure to hold the master list of objects */ -typedef struct rct_obj_list_t { - - /* Pointer to the objects */ - struct rct_obj_t *objects; - - /* The number of objects in the list */ - long count; - - /* The number of objects in the list that have not been freed */ - long remaining; -} rct_obj_list_t; - -/* Structure for an object */ -typedef struct rct_obj_t { - /* The ID for this object */ - hid_t id; - - /* The number of times this object has been freed */ - int nfrees; - - /* Whether we are currently freeing this object directly - * through H5Idec_ref(). - */ - bool freeing; - - /* Pointer to the master list of all objects */ - rct_obj_list_t *list; -} rct_obj_t; - -/* Free callback passed to H5Iclear_type() - * - * When invoked on a closing object, frees a random unfreed ID in the - * master list of objects. - */ -static herr_t -rct_free_cb(void *_obj, void H5_ATTR_UNUSED **_ctx) -{ - rct_obj_t *obj = (rct_obj_t *)_obj; - long remove_nth; - long i; - herr_t ret; - - /* Mark this object as freed */ - obj->nfrees++; - - /* Decrement the number of objects in the list that have not been freed */ - obj->list->remaining--; - - /* If this object isn't already being freed by a callback free call and - * the master object list still contains objects to free, pick another - * object and free it. - */ - if (!obj->freeing && (obj->list->remaining > 0)) { - - /* Pick a random object from the list. This is done by picking a - * random number between 0 and the # of remaining unfreed objects - * and then scanning through the list to find that nth unfreed - * object. - */ - remove_nth = HDrandom() % obj->list->remaining; - for (i = 0; i < obj->list->count; i++) - if (obj->list->objects[i].nfrees == 0) { - if (remove_nth == 0) - break; - else - remove_nth--; - } - - /* Badness if we scanned through the list and didn't manage to - * select one to delete (the list stats were probably updated - * incorrectly). - */ - if (i == obj->list->count) { - ERROR("invalid obj_list"); - goto error; - } - - /* Mark the object we're about to free so its own callback does - * not free another object. We don't want to recursively free the - * entire list when we free the first ID. - */ - obj->list->objects[i].freeing = true; - - /* Decrement the reference count on the object */ - ret = H5Idec_ref(obj->list->objects[i].id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (ret == FAIL) - goto error; - - /* Unset the "freeing" flag */ - obj->list->objects[i].freeing = false; - } - - /* Verify the number of objects remaining in the master list is non-negative */ - if (obj->list->remaining < 0) { - ERROR("invalid number of objects remaining"); - goto error; - } - - return 0; - -error: - return -1; -} /* end rct_free_cb() */ - -/* Test function */ -static int -test_remove_clear_type(void) -{ - H5I_type_t obj_type; - rct_obj_list_t obj_list; - rct_obj_t *objects = NULL; /* Convenience pointer to objects stored in master list */ - size_t list_size; - long i, j; - herr_t ret; /* return value */ - - /* Register a user-defined type with our custom ID-deleting callback */ - obj_type = H5Iregister_type((size_t)8, 0, rct_free_cb); - CHECK(obj_type, H5I_BADID, "H5Iregister_type"); - if (obj_type == H5I_BADID) - goto error; - - /* Create an array to hold the objects in the master list */ - list_size = RCT_MAX_NOBJS * sizeof(rct_obj_t); - obj_list.objects = malloc(list_size); - CHECK_PTR(obj_list.objects, "calloc"); - if (NULL == obj_list.objects) - goto error; - - /* Set a convenience pointer to the object array */ - objects = obj_list.objects; - - for (i = 0; i < RCT_NITER; i++) { - - /* The number of members in the type, according to the HDF5 library */ - hsize_t nmembers = 1234567; /* (init to fake number) */ - - /* The number of objects found while scanning through the object list */ - int found; - - /********************* - * Build object list * - *********************/ - - memset(obj_list.objects, 0, list_size); - - /* The number of objects used is a random number between the min and max */ - obj_list.count = obj_list.remaining = - RCT_MIN_NOBJS + (HDrandom() % (long)(RCT_MAX_NOBJS - RCT_MIN_NOBJS + 1)); - - /* Create the actual objects */ - for (j = 0; j < obj_list.count; j++) { - - /* Object setup */ - objects[j].nfrees = 0; - objects[j].freeing = false; - objects[j].list = &obj_list; - - /* Register an ID for it */ - objects[j].id = H5Iregister(obj_type, &objects[j]); - CHECK(objects[j].id, FAIL, "H5Iregister"); - if (objects[j].id == FAIL) - goto error; - - /* Bump the reference count by 1 (to 2) 50% of the time */ - if (HDrandom() % 2) { - ret = H5Iinc_ref(objects[j].id); - CHECK(ret, FAIL, "H5Iinc_ref"); - if (ret == FAIL) - goto error; - } - } - - /****************************************** - * Clear the type with force set to false * - ******************************************/ - - /* Clear the type. Since force is false, only - * IDs with a reference count of 1 will be cleared. - */ - ret = H5Iclear_type(obj_type, false); - CHECK(ret, FAIL, "H5Iclear_type"); - if (ret == FAIL) - goto error; - - /* Verify that the object struct fields are sane and count the - * number of unfreed objects - */ - found = 0; - for (j = 0; j < obj_list.count; j++) { - - if (objects[j].nfrees == 0) { - /* Count unfreed objects */ - found++; - } - else { - /* Every freed object should have been freed exactly once */ - VERIFY(objects[j].nfrees, 1, "object freed more than once"); - if (objects[j].nfrees != 1) - goto error; - } - - /* No object should still be marked as "freeing" */ - VERIFY(objects[j].freeing, false, "object marked as freeing"); - if (objects[j].freeing != false) - goto error; - } - - /* Verify the number of unfreed objects we found during our scan - * matches the number stored in the list - */ - VERIFY(obj_list.remaining, found, "incorrect number of objects remaining"); - if (obj_list.remaining != found) - goto error; - - /* Make sure the HDF5 library confirms our count */ - ret = H5Inmembers(obj_type, &nmembers); - CHECK(ret, FAIL, "H5Inmembers"); - if (ret == FAIL) - goto error; - VERIFY(nmembers, found, "The number of members remaining in the type did not match our count"); - if (nmembers != (hsize_t)found) - goto error; - - /***************************************** - * Clear the type with force set to true * - *****************************************/ - - /* Clear the type. Since force is true, all IDs will be cleared. */ - ret = H5Iclear_type(obj_type, true); - CHECK(ret, FAIL, "H5Iclear_type"); - if (ret == FAIL) - goto error; - - /* Verify that the object struct fields are sane */ - for (j = 0; j < obj_list.count; j++) { - - /* Every object should have been freed exactly once */ - VERIFY(objects[j].nfrees, 1, "object freed more than once"); - if (objects[j].nfrees != 1) - goto error; - - /* No object should still be marked as "freeing" */ - VERIFY(objects[j].freeing, false, "object marked as freeing"); - if (objects[j].freeing != false) - goto error; - } - - /* Verify the number of objects is 0 */ - VERIFY(obj_list.remaining, 0, "objects remaining was not zero"); - if (obj_list.remaining != 0) - goto error; - - /* Make sure the HDF5 library confirms zero members in the type */ - ret = H5Inmembers(obj_type, &nmembers); - CHECK(ret, FAIL, "H5Inmembers"); - if (ret == FAIL) - goto error; - VERIFY(nmembers, 0, "The number of members remaining in the type was not zero"); - if (nmembers != 0) - goto error; - } - - /* Destroy the type */ - ret = H5Idestroy_type(obj_type); - CHECK(ret, FAIL, "H5Idestroy_type"); - if (ret == FAIL) - goto error; - - /* Free the object array */ - free(obj_list.objects); - - return 0; - -error: - /* Cleanup. For simplicity, just destroy the types and ignore errors. */ - H5E_BEGIN_TRY - { - H5Idestroy_type(obj_type); - } - H5E_END_TRY - - free(obj_list.objects); - - return -1; -} /* end test_remove_clear_type() */ - -#if defined(H5VL_VERSION) && H5VL_VERSION >= 2 -/* Typedef for future objects */ -typedef struct { - H5I_type_t obj_type; /* ID type for actual object */ -} future_obj_t; - -/* Global (static) future ID object type */ -H5I_type_t future_obj_type_g = H5I_BADID; - -/* Callback to free the actual object for future object test */ -static herr_t -free_actual_object(void *_p, void H5_ATTR_UNUSED **_ctx) -{ - int *p = (int *)_p; - - if (7 != *p) - return FAIL; - - free(p); - - return SUCCEED; -} - -/* Callback to realize a future object */ -static herr_t -realize_future_cb(void *_future_obj, hid_t *actual_id) -{ - future_obj_t *future_obj = (future_obj_t *)_future_obj; /* Future object */ - int *actual_obj; /* Pointer to the actual object */ - - /* Check for bad future object */ - if (NULL == future_obj) - return FAIL; - - /* Determine type of object to realize */ - if (H5I_DATASPACE == future_obj->obj_type) { - hsize_t dims = 13; - - if ((*actual_id = H5Screate_simple(1, &dims, NULL)) < 0) - return FAIL; - } - else if (H5I_DATATYPE == future_obj->obj_type) { - if ((*actual_id = H5Tcopy(H5T_NATIVE_INT)) < 0) - return FAIL; - } - else if (H5I_GENPROP_LST == future_obj->obj_type) { - if ((*actual_id = H5Pcreate(H5P_DATASET_XFER)) < 0) - return FAIL; - } - else { - /* Create a new object (the 'actual object') of the correct type */ - if (NULL == (actual_obj = malloc(sizeof(int)))) - return FAIL; - *actual_obj = 7; - - /* Register actual object of the user-defined type */ - *actual_id = H5Iregister(future_obj->obj_type, actual_obj); - CHECK(*actual_id, FAIL, "H5Iregister"); - if (*actual_id == FAIL) - return FAIL; - } - - return SUCCEED; -} - -/* Callback to discard a future object */ -static herr_t -discard_future_cb(void *future_obj) -{ - if (NULL == future_obj) - return FAIL; - - free(future_obj); - - return SUCCEED; -} - -/* Callback to realize a future object when future objects are NULL*/ -static herr_t -realize_future_generate_cb(void *_future_obj, hid_t *actual_id) -{ - future_obj_t *future_obj = (future_obj_t *)_future_obj; /* Future object */ - int *actual_obj; /* Pointer to the actual object */ - - if (NULL != future_obj) - return FAIL; - /* Create a new object (the 'actual object') of the correct type */ - if (NULL == (actual_obj = malloc(sizeof(int)))) - return FAIL; - *actual_obj = 7; - - /* Register actual object without using future object info */ - *actual_id = H5Iregister(future_obj_type_g, actual_obj); - CHECK(*actual_id, FAIL, "H5Iregister"); - if (*actual_id == FAIL) - return FAIL; - - return SUCCEED; -} - -/* Callback to discard a future object when future objects are NULL */ -static herr_t -discard_future_generate_cb(void *future_obj) -{ - if (NULL != future_obj) - return FAIL; - - return SUCCEED; -} - -/* Test function */ -static int -test_future_ids(void) -{ - H5I_type_t obj_type; /* New user-defined ID type */ - hid_t future_id; /* ID for future object */ - int fake_future_obj; /* "Fake" future object for tests */ - future_obj_t *future_obj; /* Future object */ - int *actual_obj; /* Actual object */ - int *actual_obj2; /* Another actual object */ - H5I_type_t id_type; /* Type of ID */ - H5T_class_t type_class; /* Datatype class */ - herr_t ret; /* Return value */ - - /* Register a user-defined type with our custom ID-deleting callback */ - obj_type = H5Iregister_type((size_t)15, 0, free_actual_object); - CHECK(obj_type, H5I_BADID, "H5Iregister_type"); - if (H5I_BADID == obj_type) - goto error; - - /* Test basic error conditions */ - fake_future_obj = 0; - H5E_BEGIN_TRY - { - future_id = H5Iregister_future(obj_type, &fake_future_obj, NULL, NULL); - } - H5E_END_TRY - VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID != future_id) - goto error; - - H5E_BEGIN_TRY - { - future_id = H5Iregister_future(obj_type, &fake_future_obj, realize_future_cb, NULL); - } - H5E_END_TRY - VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID != future_id) - goto error; - - H5E_BEGIN_TRY - { - future_id = H5Iregister_future(obj_type, &fake_future_obj, NULL, discard_future_cb); - } - H5E_END_TRY - VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID != future_id) - goto error; - - H5E_BEGIN_TRY - { - future_id = H5Iregister_future(H5I_BADID, &fake_future_obj, realize_future_cb, discard_future_cb); - } - H5E_END_TRY - VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID != future_id) - goto error; - - /* Test base use-case: create a future object and destroy type without - * realizing the future object. - */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = obj_type; - future_id = H5Iregister_future(obj_type, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Destroy the type */ - ret = H5Idestroy_type(obj_type); - CHECK(ret, FAIL, "H5Idestroy_type"); - if (FAIL == ret) - goto error; - - /* Re-register a user-defined type with our custom ID-deleting callback */ - obj_type = H5Iregister_type((size_t)15, 0, free_actual_object); - CHECK(obj_type, H5I_BADID, "H5Iregister_type"); - if (H5I_BADID == obj_type) - goto error; - - /* Test base use-case: create a future object and realize the actual object. */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = obj_type; - future_id = H5Iregister_future(obj_type, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - actual_obj = H5Iobject_verify(future_id, obj_type); - CHECK_PTR(actual_obj, "H5Iobject_verify"); - if (NULL == actual_obj) - goto error; - VERIFY(*actual_obj, 7, "H5Iobject_verify"); - if (7 != *actual_obj) - goto error; - - /* Retrieve the object again and verify that it's the same actual object */ - actual_obj2 = H5Iobject_verify(future_id, obj_type); - CHECK_PTR(actual_obj2, "H5Iobject_verify"); - if (NULL == actual_obj2) - goto error; - VERIFY(*actual_obj2, 7, "H5Iobject_verify"); - if (7 != *actual_obj2) - goto error; - CHECK_PTR_EQ(actual_obj, actual_obj2, "H5Iobject_verify"); - if (actual_obj != actual_obj2) - goto error; - - /* Destroy the type */ - ret = H5Idestroy_type(obj_type); - CHECK(ret, FAIL, "H5Idestroy_type"); - if (FAIL == ret) - goto error; - - /* Re-register a user-defined type with our custom ID-deleting callback */ - obj_type = H5Iregister_type((size_t)15, 0, free_actual_object); - CHECK(obj_type, H5I_BADID, "H5Iregister_type"); - if (H5I_BADID == obj_type) - goto error; - - /* Set the global future object type */ - future_obj_type_g = obj_type; - - /* Test "actual object generator" use-case: create a future object with - * NULL object pointer, to create new object of predefined type when - * future object is realized. - */ - future_id = H5Iregister_future(obj_type, NULL, realize_future_generate_cb, discard_future_generate_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Realize the actual object, with will be dynamically allocated within - * the 'realize' callback. - */ - actual_obj = H5Iobject_verify(future_id, obj_type); - CHECK_PTR(actual_obj, "H5Iobject_verify"); - if (NULL == actual_obj) - goto error; - VERIFY(*actual_obj, 7, "H5Iobject_verify"); - if (7 != *actual_obj) - goto error; - - /* Reset the global future object type */ - future_obj_type_g = H5I_BADID; - - /* Retrieve the object again and verify that it's the same actual object */ - /* (Will fail if global future object type used) */ - actual_obj2 = H5Iobject_verify(future_id, obj_type); - CHECK_PTR(actual_obj2, "H5Iobject_verify"); - if (NULL == actual_obj2) - goto error; - VERIFY(*actual_obj2, 7, "H5Iobject_verify"); - if (7 != *actual_obj2) - goto error; - CHECK_PTR_EQ(actual_obj, actual_obj2, "H5Iobject_verify"); - if (actual_obj != actual_obj2) - goto error; - - /* Destroy the type */ - ret = H5Idestroy_type(obj_type); - CHECK(ret, FAIL, "H5Idestroy_type"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - /* (DATASPACE) */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_DATASPACE; - future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* (Can't verify the type of the future ID, because the library's current - * implementation realizes the object during sanity checks on the ID) - */ - - /* Close future object for pre-defined type without realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_DATASPACE; - future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Verify that the application believes the future ID is a dataspace */ - /* (Currently realizes the object "implicitly" during a sanity check) */ - id_type = H5Iget_type(future_id); - CHECK(id_type, H5I_BADID, "H5Iget_type"); - if (H5I_BADID == id_type) - goto error; - if (H5I_DATASPACE != id_type) - goto error; - - /* Close future object for pre-defined type without realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_DATASPACE; - future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Realize future dataspace by requesting its rank */ - ret = H5Sget_simple_extent_ndims(future_id); - CHECK(ret, FAIL, "H5Sget_simple_extent_ndims"); - if (FAIL == ret) - goto error; - if (1 != ret) - goto error; - - /* Verify that the application believes the ID is still a dataspace */ - id_type = H5Iget_type(future_id); - CHECK(id_type, H5I_BADID, "H5Iget_type"); - if (H5I_BADID == id_type) - goto error; - if (H5I_DATASPACE != id_type) - goto error; - - /* Close future object for pre-defined type after realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - /* (DATATYPE) */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_DATATYPE; - future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* (Can't verify the type of the future ID, because the library's current - * implementation realizes the object during sanity checks on the ID) - */ - - /* Close future object for pre-defined type without realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_DATATYPE; - future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Verify that the application believes the future ID is a datatype */ - /* (Currently realizes the object "implicitly" during a sanity check) */ - id_type = H5Iget_type(future_id); - CHECK(id_type, H5I_BADID, "H5Iget_type"); - if (H5I_BADID == id_type) - goto error; - if (H5I_DATATYPE != id_type) - goto error; - - /* Close future object for pre-defined type without realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_DATATYPE; - future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Realize future datatype by requesting its class */ - type_class = H5Tget_class(future_id); - CHECK(ret, FAIL, "H5Tget_class"); - if (FAIL == ret) - goto error; - if (H5T_INTEGER != type_class) - goto error; - - /* Verify that the application believes the ID is still a datatype */ - id_type = H5Iget_type(future_id); - CHECK(id_type, H5I_BADID, "H5Iget_type"); - if (H5I_BADID == id_type) - goto error; - if (H5I_DATATYPE != id_type) - goto error; - - /* Close future object for pre-defined type after realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - /* (PROPERTY LIST) */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_GENPROP_LST; - future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* (Can't verify the type of the future ID, because the library's current - * implementation realizes the object during sanity checks on the ID) - */ - - /* Close future object for pre-defined type without realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_GENPROP_LST; - future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Verify that the application believes the future ID is a property list */ - /* (Currently realizes the object "implicitly" during a sanity check) */ - id_type = H5Iget_type(future_id); - CHECK(id_type, H5I_BADID, "H5Iget_type"); - if (H5I_BADID == id_type) - goto error; - if (H5I_GENPROP_LST != id_type) - goto error; - - /* Close future object for pre-defined type without realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - /* Test base use-case: create a future object for a pre-defined type */ - future_obj = malloc(sizeof(future_obj_t)); - future_obj->obj_type = H5I_GENPROP_LST; - future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb); - CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future"); - if (H5I_INVALID_HID == future_id) - goto error; - - /* Realize future property list by verifying its class */ - ret = H5Pisa_class(future_id, H5P_DATASET_XFER); - CHECK(ret, FAIL, "H5Pisa_class"); - if (FAIL == ret) - goto error; - if (true != ret) - goto error; - - /* Verify that the application believes the ID is still a property list */ - id_type = H5Iget_type(future_id); - CHECK(id_type, H5I_BADID, "H5Iget_type"); - if (H5I_BADID == id_type) - goto error; - if (H5I_GENPROP_LST != id_type) - goto error; - - /* Close future object for pre-defined type after realizing it */ - ret = H5Idec_ref(future_id); - CHECK(ret, FAIL, "H5Idec_ref"); - if (FAIL == ret) - goto error; - - return 0; - -error: - /* Cleanup. For simplicity, just destroy the types and ignore errors. */ - H5E_BEGIN_TRY - { - H5Idestroy_type(obj_type); - } - H5E_END_TRY - - return -1; -} /* end test_future_ids() */ -#endif - -void -test_ids(void) -{ - /* Set the random # seed */ - HDsrandom((unsigned)HDtime(NULL)); - - if (basic_id_test() < 0) - TestErrPrintf("Basic ID test failed\n"); - if (id_predefined_test() < 0) - TestErrPrintf("Predefined ID type test failed\n"); - if (test_is_valid() < 0) - TestErrPrintf("H5Iis_valid test failed\n"); - if (test_get_type() < 0) - TestErrPrintf("H5Iget_type test failed\n"); - if (test_id_type_list() < 0) - TestErrPrintf("ID type list test failed\n"); - if (test_remove_clear_type() < 0) - TestErrPrintf("ID remove during H5Iclear_type test failed\n"); -#if defined(H5VL_VERSION) && H5VL_VERSION >= 2 - if (test_future_ids() < 0) - TestErrPrintf("Future ID test failed\n"); -#endif -} diff --git a/test/API/titerate.c b/test/API/titerate.c deleted file mode 100644 index 5e6ce3626b4..00000000000 --- a/test/API/titerate.c +++ /dev/null @@ -1,1260 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: titerate - * - * Test the Group & Attribute functionality - * - *************************************************************/ - -#include "testhdf5.h" -/* #include "H5srcdir.h" */ - -#define DATAFILE "titerate.h5" - -/* Number of datasets for group iteration test */ -#define NDATASETS 50 - -/* Number of attributes for attribute iteration test */ -#define NATTR 50 - -/* Number of groups for second group iteration test */ -#define ITER_NGROUPS 150 - -/* General maximum length of names used */ -#define NAMELEN 80 - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -typedef enum { RET_ZERO, RET_TWO, RET_CHANGE, RET_CHANGE2 } iter_enum; - -/* Custom group iteration callback data */ -typedef struct { - char name[NAMELEN]; /* The name of the object */ - H5O_type_t type; /* The type of the object */ - iter_enum command; /* The type of return value */ -} iter_info; - -/* Definition for test_corrupted_attnamelen */ -#define CORRUPTED_ATNAMELEN_FILE "memleak_H5O_dtype_decode_helper_H5Odtype.h5" -#define DSET_NAME "image" -typedef struct searched_err_t { - char message[256]; - bool found; -} searched_err_t; -#if 0 -/* Call back function for test_corrupted_attnamelen */ -static int find_err_msg_cb(unsigned n, const H5E_error2_t *err_desc, void *_client_data); -#endif -/* Local functions */ -int iter_strcmp(const void *s1, const void *s2); -int iter_strcmp2(const void *s1, const void *s2); -static herr_t liter_cb(hid_t group, const char *name, const H5L_info2_t *info, void *op_data); -static herr_t liter_cb2(hid_t group, const char *name, const H5L_info2_t *info, void *op_data); -herr_t aiter_cb(hid_t group, const char *name, const H5A_info_t *ainfo, void *op_data); - -/**************************************************************** -** -** iter_strcmp(): String comparison routine for qsort -** -****************************************************************/ -H5_ATTR_PURE int -iter_strcmp(const void *s1, const void *s2) -{ - return (strcmp(*(const char *const *)s1, *(const char *const *)s2)); -} - -/**************************************************************** -** -** liter_cb(): Custom link iteration callback routine. -** -****************************************************************/ -static herr_t -liter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info2_t H5_ATTR_UNUSED *link_info, - void *op_data) -{ - iter_info *info = (iter_info *)op_data; - static int count = 0; - static int count2 = 0; - - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { - SKIPPED(); - printf(" API functions for iterate aren't " - "supported with this connector\n"); - return 1; - } - - strcpy(info->name, name); - - switch (info->command) { - case RET_ZERO: - return (0); - - case RET_TWO: - return (2); - - case RET_CHANGE: - count++; - return (count > 10 ? 1 : 0); - - case RET_CHANGE2: - count2++; - return (count2 > 10 ? 1 : 0); - - default: - printf("invalid iteration command"); - return (-1); - } /* end switch */ -} /* end liter_cb() */ - -/**************************************************************** -** -** test_iter_group(): Test group iteration functionality -** -****************************************************************/ -static void -test_iter_group(hid_t fapl, bool new_format) -{ - hid_t file; /* File ID */ - hid_t dataset; /* Dataset ID */ - hid_t datatype; /* Common datatype ID */ - hid_t filespace; /* Common dataspace ID */ - hid_t root_group, grp; /* Root group ID */ - int i; /* counting variable */ - hsize_t idx; /* Index in the group */ - char name[NAMELEN]; /* temporary name buffer */ - char *lnames[NDATASETS + 2]; /* Names of the links created */ - char dataset_name[NAMELEN]; /* dataset name */ - iter_info info; /* Custom iteration information */ - H5G_info_t ginfo; /* Buffer for querying object's info */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Group Iteration Functionality\n")); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) && (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) && - (vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) && - (vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) && (vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) && - (vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE)) { - /* Create the test file with the datasets */ - file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* Test iterating over empty group */ - info.command = RET_ZERO; - idx = 0; - ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); - VERIFY(ret, SUCCEED, "H5Literate2"); - - datatype = H5Tcopy(H5T_NATIVE_INT); - CHECK(datatype, FAIL, "H5Tcopy"); - - filespace = H5Screate(H5S_SCALAR); - CHECK(filespace, FAIL, "H5Screate"); - - for (i = 0; i < NDATASETS; i++) { - snprintf(name, sizeof(name), "Dataset %d", i); - dataset = H5Dcreate2(file, name, datatype, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Keep a copy of the dataset names around for later */ - lnames[i] = strdup(name); - CHECK_PTR(lnames[i], "strdup"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ - - /* Create a group and named datatype under root group for testing */ - grp = H5Gcreate2(file, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Gcreate2"); - - lnames[NDATASETS] = strdup("grp"); - CHECK_PTR(lnames[NDATASETS], "strdup"); - - ret = H5Tcommit2(file, "dtype", datatype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - lnames[NDATASETS + 1] = strdup("dtype"); - CHECK_PTR(lnames[NDATASETS], "strdup"); - - /* Close everything up */ - ret = H5Tclose(datatype); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Sclose(filespace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Sort the dataset names */ - qsort(lnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp); - - /* Iterate through the datasets in the root group in various ways */ - file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl); - CHECK(file, FAIL, "H5Fopen"); - - /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually - * iterate through B-tree for group members in internal library design. - */ - root_group = H5Gopen2(file, "/", H5P_DEFAULT); - CHECK(root_group, FAIL, "H5Gopen2"); - - ret = H5Gget_info(root_group, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, (NDATASETS + 2), "H5Gget_info"); - - for (i = 0; i < (int)ginfo.nlinks; i++) { - H5O_info2_t oinfo; /* Object info */ - - ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, - dataset_name, (size_t)NAMELEN, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_name_by_idx"); - - //! [H5Oget_info_by_idx3_snip] - - ret = H5Oget_info_by_idx3(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, - H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_idx"); - - //! [H5Oget_info_by_idx3_snip] - - } /* end for */ - - H5E_BEGIN_TRY - { - ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, - (hsize_t)(NDATASETS + 3), dataset_name, (size_t)NAMELEN, - H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Lget_name_by_idx"); - - ret = H5Gclose(root_group); - CHECK(ret, FAIL, "H5Gclose"); - - /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually - * iterate through B-tree for group members in internal library design. - * (Same as test above, but with the file ID instead of opening the root group) - */ - ret = H5Gget_info(file, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, NDATASETS + 2, "H5Gget_info"); - - for (i = 0; i < (int)ginfo.nlinks; i++) { - H5O_info2_t oinfo; /* Object info */ - - ret = (herr_t)H5Lget_name_by_idx(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, dataset_name, - (size_t)NAMELEN, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_name_by_idx"); - - ret = H5Oget_info_by_idx3(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, - H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_idx3"); - } /* end for */ - - H5E_BEGIN_TRY - { - ret = (herr_t)H5Lget_name_by_idx(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3), - dataset_name, (size_t)NAMELEN, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Lget_name_by_idx"); - - /* Test invalid indices for starting iteration */ - info.command = RET_ZERO; - idx = (hsize_t)-1; - H5E_BEGIN_TRY - { - ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Literate2"); - - /* Test skipping exactly as many entries as in the group */ - idx = NDATASETS + 2; - H5E_BEGIN_TRY - { - ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Literate2"); - - /* Test skipping more entries than are in the group */ - idx = NDATASETS + 3; - H5E_BEGIN_TRY - { - ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Literate2"); - - /* Test all objects in group, when callback always returns 0 */ - info.command = RET_ZERO; - idx = 0; - if ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0) - TestErrPrintf("Group iteration function didn't return zero correctly!\n"); - - /* Test all objects in group, when callback always returns 1 */ - /* This also tests the "restarting" ability, because the index changes */ - info.command = RET_TWO; - i = 0; - idx = 0; - memset(info.name, 0, NAMELEN); - while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0) { - /* Verify return value from iterator gets propagated correctly */ - VERIFY(ret, 2, "H5Literate2"); - - /* Increment the number of times "2" is returned */ - i++; - - /* Verify that the index is the correct value */ - VERIFY(idx, (hsize_t)i, "H5Literate2"); - if (idx != (hsize_t)i) - break; - if (idx > (NDATASETS + 2)) - TestErrPrintf("Group iteration function walked too far!\n"); - - /* Verify that the correct name is retrieved */ - if (strncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0) - TestErrPrintf( - "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", - (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); - } /* end while */ - VERIFY(ret, -1, "H5Literate2"); - - if (i != (NDATASETS + 2)) - TestErrPrintf("%u: Group iteration function didn't perform multiple iterations correctly!\n", - __LINE__); - - /* Test all objects in group, when callback changes return value */ - /* This also tests the "restarting" ability, because the index changes */ - info.command = new_format ? RET_CHANGE2 : RET_CHANGE; - i = 0; - idx = 0; - memset(info.name, 0, NAMELEN); - while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) >= 0) { - /* Verify return value from iterator gets propagated correctly */ - VERIFY(ret, 1, "H5Literate2"); - - /* Increment the number of times "1" is returned */ - i++; - - /* Verify that the index is the correct value */ - VERIFY(idx, (hsize_t)(i + 10), "H5Literate2"); - if (idx != (hsize_t)(i + 10)) - break; - if (idx > (NDATASETS + 2)) - TestErrPrintf("Group iteration function walked too far!\n"); - - /* Verify that the correct name is retrieved */ - if (strncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0) - TestErrPrintf( - "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", - (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); - } /* end while */ - VERIFY(ret, -1, "H5Literate2"); - - if (i != 42 || idx != 52) - TestErrPrintf("%u: Group iteration function didn't perform multiple iterations correctly!\n", - __LINE__); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free the dataset names */ - for (i = 0; i < (NDATASETS + 2); i++) - free(lnames[i]); - } -} /* test_iter_group() */ - -/**************************************************************** -** -** aiter_cb(): Custom group iteration callback routine. -** -****************************************************************/ -herr_t -aiter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5A_info_t H5_ATTR_UNUSED *ainfo, void *op_data) -{ - iter_info *info = (iter_info *)op_data; - static int count = 0; - static int count2 = 0; - - strcpy(info->name, name); - - switch (info->command) { - case RET_ZERO: - return (0); - - case RET_TWO: - return (2); - - case RET_CHANGE: - count++; - return (count > 10 ? 1 : 0); - - case RET_CHANGE2: - count2++; - return (count2 > 10 ? 1 : 0); - - default: - printf("invalid iteration command"); - return (-1); - } /* end switch */ -} /* end aiter_cb() */ - -/**************************************************************** -** -** test_iter_attr(): Test attribute iteration functionality -** -****************************************************************/ -static void -test_iter_attr(hid_t fapl, bool new_format) -{ - hid_t file; /* File ID */ - hid_t dataset; /* Common Dataset ID */ - hid_t filespace; /* Common dataspace ID */ - hid_t attribute; /* Attribute ID */ - int i; /* counting variable */ - hsize_t idx; /* Index in the attribute list */ - char name[NAMELEN]; /* temporary name buffer */ - char *anames[NATTR]; /* Names of the attributes created */ - iter_info info; /* Custom iteration information */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Attribute Iteration Functionality\n")); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) && (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) && - (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { - memset(&info, 0, sizeof(iter_info)); - - /* Create the test file with the datasets */ - file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - filespace = H5Screate(H5S_SCALAR); - CHECK(filespace, FAIL, "H5Screate"); - - dataset = - H5Dcreate2(file, "Dataset", H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - for (i = 0; i < NATTR; i++) { - snprintf(name, sizeof(name), "Attribute %02d", i); - attribute = H5Acreate2(dataset, name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attribute, FAIL, "H5Acreate2"); - - /* Keep a copy of the attribute names around for later */ - anames[i] = strdup(name); - CHECK_PTR(anames[i], "strdup"); - - ret = H5Aclose(attribute); - CHECK(ret, FAIL, "H5Aclose"); - } /* end for */ - - /* Close everything up */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(filespace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Iterate through the attributes on the dataset in various ways */ - file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl); - CHECK(file, FAIL, "H5Fopen"); - - dataset = H5Dopen2(file, "Dataset", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Test invalid indices for starting iteration */ - info.command = RET_ZERO; - - /* Test skipping exactly as many attributes as there are */ - idx = NATTR; - H5E_BEGIN_TRY - { - ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate2"); - - /* Test skipping more attributes than there are */ - idx = NATTR + 1; - H5E_BEGIN_TRY - { - ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate2"); - - /* Test all attributes on dataset, when callback always returns 0 */ - info.command = RET_ZERO; - idx = 0; - if ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) - TestErrPrintf("Attribute iteration function didn't return zero correctly!\n"); - - /* Test all attributes on dataset, when callback always returns 2 */ - /* This also tests the "restarting" ability, because the index changes */ - info.command = RET_TWO; - i = 0; - idx = 0; - while ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) { - /* Verify return value from iterator gets propagated correctly */ - VERIFY(ret, 2, "H5Aiterate2"); - - /* Increment the number of times "2" is returned */ - i++; - - /* Verify that the index is the correct value */ - VERIFY(idx, (unsigned)i, "H5Aiterate2"); - - /* Don't check name when new format is used */ - if (!new_format) { - /* Verify that the correct name is retrieved */ - if (idx > 0) { - if (strcmp(info.name, anames[(size_t)idx - 1]) != 0) - TestErrPrintf( - "%u: Attribute iteration function didn't set names correctly, info.name = " - "'%s', anames[%u] = '%s'!\n", - __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); - } /* end if */ - else - TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__); - } /* end if */ - } /* end while */ - VERIFY(ret, -1, "H5Aiterate2"); - if (i != 50 || idx != 50) - TestErrPrintf("%u: Attribute iteration function didn't perform multiple iterations correctly!\n", - __LINE__); - - /* Test all attributes on dataset, when callback changes return value */ - /* This also tests the "restarting" ability, because the index changes */ - info.command = new_format ? RET_CHANGE2 : RET_CHANGE; - i = 0; - idx = 0; - while ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) { - /* Verify return value from iterator gets propagated correctly */ - VERIFY(ret, 1, "H5Aiterate2"); - - /* Increment the number of times "1" is returned */ - i++; - - /* Verify that the index is the correct value */ - VERIFY(idx, (unsigned)i + 10, "H5Aiterate2"); - - /* Don't check name when new format is used */ - if (!new_format) { - /* Verify that the correct name is retrieved */ - if (idx > 0) { - if (strcmp(info.name, anames[(size_t)idx - 1]) != 0) - TestErrPrintf( - "%u: Attribute iteration function didn't set names correctly, info.name = " - "'%s', anames[%u] = '%s'!\n", - __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); - } - else - TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__); - } /* end if */ - } /* end while */ - VERIFY(ret, -1, "H5Aiterate2"); - if (i != 40 || idx != 50) - TestErrPrintf("%u: Attribute iteration function didn't perform multiple iterations correctly!\n", - __LINE__); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Free the attribute names */ - for (i = 0; i < NATTR; i++) - free(anames[i]); - } -} /* test_iter_attr() */ - -/**************************************************************** -** -** iter_strcmp2(): String comparison routine for qsort -** -****************************************************************/ -H5_ATTR_PURE int -iter_strcmp2(const void *s1, const void *s2) -{ - return (strcmp((const char *)s1, (const char *)s2)); -} /* end iter_strcmp2() */ - -/**************************************************************** -** -** liter_cb2(): Custom link iteration callback routine. -** -****************************************************************/ -static herr_t -liter_cb2(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_UNUSED *link_info, void *opdata) -{ - const iter_info *test_info = (const iter_info *)opdata; - H5O_info2_t oinfo; - herr_t ret; /* Generic return value */ - - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) { - SKIPPED(); - printf(" API functions for iterate and basic links aren't " - "supported with this connector\n"); - return 1; - } - - if (strcmp(name, test_info->name) != 0) { - TestErrPrintf("name = '%s', test_info = '%s'\n", name, test_info->name); - return (H5_ITER_ERROR); - } /* end if */ - - /* - * Get type of the object and check it. - */ - ret = H5Oget_info_by_name3(loc_id, name, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - - if (test_info->type != oinfo.type) { - TestErrPrintf("test_info->type = %d, oinfo.type = %d\n", test_info->type, (int)oinfo.type); - return (H5_ITER_ERROR); - } /* end if */ - - return (H5_ITER_STOP); -} /* liter_cb2() */ - -/**************************************************************** -** -** test_iter_group_large(): Test group iteration functionality -** for groups with large #'s of objects -** -****************************************************************/ -static void -test_iter_group_large(hid_t fapl) -{ - hid_t file; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t group; /* Group ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims[] = {SPACE1_DIM1}; - herr_t ret; /* Generic return value */ - char gname[20]; /* Temporary group name */ - iter_info *names; /* Names of objects in the root group */ - iter_info *curr_name; /* Pointer to the current name in the root group */ - int i; - - /* Compound datatype */ - typedef struct s1_t { - unsigned int a; - unsigned int b; - float c; - } s1_t; - - /* Allocate & initialize array */ - names = (iter_info *)calloc(sizeof(iter_info), (ITER_NGROUPS + 2)); - CHECK_PTR(names, "calloc"); - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Large Group Iteration Functionality\n")); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) && (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) && - (vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) && - (vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { - /* Create file */ - file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(SPACE1_RANK, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create a bunch of groups */ - for (i = 0; i < ITER_NGROUPS; i++) { - snprintf(gname, sizeof(gname), "Group_%d", i); - - /* Add the name to the list of objects in the root group */ - strcpy(names[i].name, gname); - names[i].type = H5O_TYPE_GROUP; - - /* Create a group */ - group = H5Gcreate2(file, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, FAIL, "H5Gcreate2"); - - /* Close a group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - } /* end for */ - - /* Create a dataset */ - dataset = H5Dcreate2(file, "Dataset1", H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Add the name to the list of objects in the root group */ - strcpy(names[ITER_NGROUPS].name, "Dataset1"); - names[ITER_NGROUPS].type = H5O_TYPE_DATASET; - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close Dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create a datatype */ - tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid, FAIL, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(file, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Add the name to the list of objects in the root group */ - strcpy(names[ITER_NGROUPS + 1].name, "Datatype1"); - names[ITER_NGROUPS + 1].type = H5O_TYPE_NAMED_DATATYPE; - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Need to sort the names in the root group, cause that's what the library does */ - qsort(names, (size_t)(ITER_NGROUPS + 2), sizeof(iter_info), iter_strcmp2); - - /* Iterate through the file to see members of the root group */ - curr_name = &names[0]; - ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, NULL, liter_cb2, curr_name); - CHECK(ret, FAIL, "H5Literate2"); - for (i = 1; i < 100; i++) { - hsize_t idx = (hsize_t)i; - - curr_name = &names[i]; - ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb2, curr_name); - CHECK(ret, FAIL, "H5Literate2"); - } /* end for */ - - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Release memory */ - free(names); - } -} /* test_iterate_group_large() */ - -/**************************************************************** -** -** test_grp_memb_funcs(): Test group member information -** functionality -** -****************************************************************/ -static void -test_grp_memb_funcs(hid_t fapl) -{ - hid_t file; /* File ID */ - hid_t dataset; /* Dataset ID */ - hid_t datatype; /* Common datatype ID */ - hid_t filespace; /* Common dataspace ID */ - hid_t root_group, grp; /* Root group ID */ - int i; /* counting variable */ - char name[NAMELEN]; /* temporary name buffer */ - char *dnames[NDATASETS + 2]; /* Names of the datasets created */ - char *obj_names[NDATASETS + 2]; /* Names of the objects in group */ - char dataset_name[NAMELEN]; /* dataset name */ - ssize_t name_len; /* Length of object's name */ - H5G_info_t ginfo; /* Buffer for querying object's info */ - herr_t ret = SUCCEED; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Group Member Information Functionality\n")); - - /* Create the test file with the datasets */ - file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - datatype = H5Tcopy(H5T_NATIVE_INT); - CHECK(datatype, FAIL, "H5Tcopy"); - - filespace = H5Screate(H5S_SCALAR); - CHECK(filespace, FAIL, "H5Screate"); - - for (i = 0; i < NDATASETS; i++) { - snprintf(name, sizeof(name), "Dataset %d", i); - dataset = H5Dcreate2(file, name, datatype, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Keep a copy of the dataset names around for later */ - dnames[i] = strdup(name); - CHECK_PTR(dnames[i], "strdup"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ - - /* Create a group and named datatype under root group for testing */ - grp = H5Gcreate2(file, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Gcreate2"); - - dnames[NDATASETS] = strdup("grp"); - CHECK_PTR(dnames[NDATASETS], "strdup"); - - ret = H5Tcommit2(file, "dtype", datatype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - dnames[NDATASETS + 1] = strdup("dtype"); - CHECK_PTR(dnames[NDATASETS], "strdup"); - - /* Close everything up */ - ret = H5Tclose(datatype); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Gclose(grp); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Sclose(filespace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Sort the dataset names */ - qsort(dnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp); - - /* Iterate through the datasets in the root group in various ways */ - file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl); - CHECK(file, FAIL, "H5Fopen"); - - /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually - * iterate through B-tree for group members in internal library design. - */ - root_group = H5Gopen2(file, "/", H5P_DEFAULT); - CHECK(root_group, FAIL, "H5Gopen2"); - - ret = H5Gget_info(root_group, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, (NDATASETS + 2), "H5Gget_info"); - - for (i = 0; i < (int)ginfo.nlinks; i++) { - H5O_info2_t oinfo; /* Object info */ - - /* Test with NULL for name, to query length */ - name_len = H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, NULL, - (size_t)NAMELEN, H5P_DEFAULT); - CHECK(name_len, FAIL, "H5Lget_name_by_idx"); - - ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, - dataset_name, (size_t)(name_len + 1), H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_name_by_idx"); - - /* Double-check that the length is the same */ - VERIFY(ret, name_len, "H5Lget_name_by_idx"); - - /* Keep a copy of the dataset names around for later */ - obj_names[i] = strdup(dataset_name); - CHECK_PTR(obj_names[i], "strdup"); - - ret = H5Oget_info_by_idx3(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, - H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_idx3"); - - if (!strcmp(dataset_name, "grp")) - VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); - if (!strcmp(dataset_name, "dtype")) - VERIFY(oinfo.type, H5O_TYPE_NAMED_DATATYPE, "H5Lget_name_by_idx"); - if (!strncmp(dataset_name, "Dataset", (size_t)7)) - VERIFY(oinfo.type, H5O_TYPE_DATASET, "H5Lget_name_by_idx"); - } /* end for */ - - H5E_BEGIN_TRY - { - ret = - (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3), - dataset_name, (size_t)NAMELEN, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Lget_name_by_idx"); - - /* Sort the dataset names */ - qsort(obj_names, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp); - - /* Compare object names */ - for (i = 0; i < (int)ginfo.nlinks; i++) { - ret = strcmp(dnames[i], obj_names[i]); - VERIFY(ret, 0, "strcmp"); - } /* end for */ - - ret = H5Gclose(root_group); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free the dataset names */ - for (i = 0; i < (NDATASETS + 2); i++) { - free(dnames[i]); - free(obj_names[i]); - } /* end for */ -} /* test_grp_memb_funcs() */ - -/**************************************************************** -** -** test_links(): Test soft and hard link iteration -** -****************************************************************/ -static void -test_links(hid_t fapl) -{ - hid_t file; /* File ID */ - char obj_name[NAMELEN]; /* Names of the object in group */ - ssize_t name_len; /* Length of object's name */ - hid_t gid, gid1; - H5G_info_t ginfo; /* Buffer for querying object's info */ - hsize_t i; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Soft and Hard Link Iteration Functionality\n")); - - /* Create the test file with the datasets */ - file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* create groups */ - gid = H5Gcreate2(file, "/g1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - gid1 = H5Gcreate2(file, "/g1/g1.1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - - /* create soft and hard links to the group "/g1". */ - ret = H5Lcreate_soft("something", gid, "softlink", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - - ret = H5Lcreate_hard(gid, "/g1", H5L_SAME_LOC, "hardlink", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_hard"); - - ret = H5Gget_info(gid, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, 3, "H5Gget_info"); - - /* Test these two functions, H5Oget_info_by_idx and H5Lget_name_by_idx */ - for (i = 0; i < ginfo.nlinks; i++) { - H5O_info2_t oinfo; /* Object info */ - H5L_info2_t linfo; /* Link info */ - - /* Get link name */ - name_len = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, i, obj_name, (size_t)NAMELEN, - H5P_DEFAULT); - CHECK(name_len, FAIL, "H5Lget_name_by_idx"); - - /* Get link type */ - ret = H5Lget_info_by_idx2(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &linfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info_by_idx2"); - - /* Get object type */ - if (linfo.type == H5L_TYPE_HARD) { - ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, - H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_idx3"); - } /* end if */ - - if (!strcmp(obj_name, "g1.1")) - VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); - else if (!strcmp(obj_name, "hardlink")) - VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); - else if (!strcmp(obj_name, "softlink")) - VERIFY(linfo.type, H5L_TYPE_SOFT, "H5Lget_name_by_idx"); - else - ERROR("unknown object name"); - } /* end for */ - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_links() */ - -/*------------------------------------------------------------------------- - * Function: find_err_msg_cb - * - * Purpose: Callback function to find the given error message. - * Helper function for test_corrupted_attnamelen(). - * - * Return: H5_ITER_STOP when the message is found - * H5_ITER_CONT, otherwise - * - *------------------------------------------------------------------------- - */ -#if 0 -static int -find_err_msg_cb(unsigned H5_ATTR_UNUSED n, const H5E_error2_t *err_desc, void *_client_data) -{ - int status = H5_ITER_CONT; - searched_err_t *searched_err = (searched_err_t *)_client_data; - - if (searched_err == NULL) - return H5_ITER_ERROR; - - /* If the searched error message is found, stop the iteration */ - if (err_desc->desc != NULL && strcmp(err_desc->desc, searched_err->message) == 0) { - searched_err->found = true; - status = H5_ITER_STOP; - } - - return status; -} /* end find_err_msg_cb() */ -#endif - -/************************************************************************** -** -** test_corrupted_attnamelen(): Test the fix for the JIRA issue HDFFV-10588, -** where corrupted attribute's name length can be -** detected and invalid read can be avoided. -** -**************************************************************************/ -#if 0 -static void -test_corrupted_attnamelen(void) -{ - hid_t fid = -1; /* File ID */ - hid_t did = -1; /* Dataset ID */ - searched_err_t err_caught; /* Data to be passed to callback func */ - int err_status; /* Status returned by H5Aiterate2 */ - herr_t ret; /* Return value */ - bool driver_is_default_compatible; - const char *testfile = H5_get_srcdir_filename(CORRUPTED_ATNAMELEN_FILE); /* Corrected test file name */ - - const char *err_message = "attribute name has different length than stored length"; - /* the error message produced when the failure occurs */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing the Handling of Corrupted Attribute's Name Length\n")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - did = H5Dopen2(fid, DSET_NAME, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Call H5Aiterate2 to trigger the failure in HDFFV-10588. Failure should - occur in the decoding stage, so some arguments are not needed. */ - err_status = H5Aiterate2(did, H5_INDEX_NAME, H5_ITER_INC, NULL, NULL, NULL); - VERIFY(err_status, FAIL, "H5Aiterate2"); - - /* Make sure the intended error was caught */ - if (err_status == -1) { - /* Initialize client data */ - strcpy(err_caught.message, err_message); - err_caught.found = false; - - /* Look for the correct error message */ - ret = H5Ewalk2(H5E_DEFAULT, H5E_WALK_UPWARD, find_err_msg_cb, &err_caught); - CHECK(ret, FAIL, "H5Ewalk2"); - - /* Fail if the indicated message is not found */ - CHECK(err_caught.found, false, "test_corrupted_attnamelen: Expected error not found"); - } - - /* Close the dataset and file */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* test_corrupted_attnamelen() */ -#endif - -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS -/**************************************************************** -** -** test_links_deprec(): Test soft and hard link iteration -** -****************************************************************/ -static void -test_links_deprec(hid_t fapl) -{ - hid_t file; /* File ID */ - char obj_name[NAMELEN]; /* Names of the object in group */ - ssize_t name_len; /* Length of object's name */ - hid_t gid, gid1; - H5G_info_t ginfo; /* Buffer for querying object's info */ - hsize_t i; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Soft and Hard Link Iteration Functionality Using Deprecated Routines\n")); - - /* Create the test file with the datasets */ - file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - /* create groups */ - gid = H5Gcreate2(file, "/g1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - gid1 = H5Gcreate2(file, "/g1/g1.1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - - /* create soft and hard links to the group "/g1". */ - ret = H5Lcreate_soft("something", gid, "softlink", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - - ret = H5Lcreate_hard(gid, "/g1", H5L_SAME_LOC, "hardlink", H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_hard"); - - ret = H5Gget_info(gid, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, 3, "H5Gget_info"); - - /* Test these two functions, H5Oget_info_by_idx and H5Lget_name_by_idx */ - for (i = 0; i < ginfo.nlinks; i++) { - H5O_info2_t oinfo; /* Object info */ - H5L_info2_t linfo; /* Link info */ - - /* Get link name */ - name_len = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, i, obj_name, (size_t)NAMELEN, - H5P_DEFAULT); - CHECK(name_len, FAIL, "H5Lget_name_by_idx"); - - /* Get link type */ - ret = H5Lget_info_by_idx2(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &linfo, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lget_info_by_idx1"); - - /* Get object type */ - if (linfo.type == H5L_TYPE_HARD) { - ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, - H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_idx"); - } /* end if */ - - if (!strcmp(obj_name, "g1.1")) - VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); - else if (!strcmp(obj_name, "hardlink")) - VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx"); - else if (!strcmp(obj_name, "softlink")) - VERIFY(linfo.type, H5L_TYPE_SOFT, "H5Lget_name_by_idx"); - else - ERROR("unknown object name"); - } /* end for */ - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_links_deprec() */ -#endif -#endif - -/**************************************************************** -** -** test_iterate(): Main iteration testing routine. -** -****************************************************************/ -void -test_iterate(void) -{ - hid_t fapl, fapl2; /* File access property lists */ - unsigned new_format; /* Whether to use the new format or not */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Iteration Operations\n")); - - /* Get the default FAPL */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Copy the file access property list */ - fapl2 = H5Pcopy(fapl); - CHECK(fapl2, FAIL, "H5Pcopy"); - - /* Set the "use the latest version of the format" bounds for creating objects in the file */ - ret = H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* These next tests use the same file */ - for (new_format = false; new_format <= true; new_format++) { - test_iter_group(new_format ? fapl2 : fapl, new_format); /* Test group iteration */ - test_iter_group_large(new_format ? fapl2 : fapl); /* Test group iteration for large # of objects */ - test_iter_attr(new_format ? fapl2 : fapl, new_format); /* Test attribute iteration */ - test_grp_memb_funcs(new_format ? fapl2 : fapl); /* Test group member information functions */ - test_links(new_format ? fapl2 : fapl); /* Test soft and hard link iteration */ -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - test_links_deprec(new_format ? fapl2 : fapl); /* Test soft and hard link iteration */ -#endif -#endif - } /* end for */ -#if 0 - /* Test the fix for issue HDFFV-10588 */ - test_corrupted_attnamelen(); -#endif - /* Close FAPLs */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(fapl2); - CHECK(ret, FAIL, "H5Pclose"); -} /* test_iterate() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_iterate - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_iterate(void) -{ - H5Fdelete(DATAFILE, H5P_DEFAULT); -} diff --git a/test/API/tmisc.c b/test/API/tmisc.c deleted file mode 100644 index 4c87425773c..00000000000 --- a/test/API/tmisc.c +++ /dev/null @@ -1,6343 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tmisc - * - * Test miscellaneous features not tested elsewhere. Generally - * regression tests for bugs that are reported and don't - * have an existing test to add them to. - * - *************************************************************/ - -#define H5D_FRIEND /*suppress error about including H5Dpkg */ - -/* Define this macro to indicate that the testing APIs should be available */ -#define H5D_TESTING - -#include "testhdf5.h" -/* #include "H5srcdir.h" */ -/* #include "H5Dpkg.h" */ /* Datasets */ -/* #include "H5MMprivate.h" */ /* Memory */ - -/* Definitions for misc. test #1 */ -#define MISC1_FILE "tmisc1.h5" -#define MISC1_VAL (13417386) /* 0xccbbaa */ -#define MISC1_VAL2 (15654348) /* 0xeeddcc */ -#define MISC1_DSET_NAME "/scalar_set" - -/* Definitions for misc. test #2 */ -#define MISC2_FILE_1 "tmisc2a.h5" -#define MISC2_FILE_2 "tmisc2b.h5" -#define MISC2_ATT_NAME_1 "scalar_att_1" -#define MISC2_ATT_NAME_2 "scalar_att_2" - -typedef struct { - char *string; -} misc2_struct; - -/* Definitions for misc. test #3 */ -#define MISC3_FILE "tmisc3.h5" -#define MISC3_RANK 2 -#define MISC3_DIM1 6 -#define MISC3_DIM2 6 -#define MISC3_CHUNK_DIM1 2 -#define MISC3_CHUNK_DIM2 2 -#define MISC3_FILL_VALUE 2 -#define MISC3_DSET_NAME "/chunked" - -/* Definitions for misc. test #4 */ -#define MISC4_FILE_1 "tmisc4a.h5" -#define MISC4_FILE_2 "tmisc4b.h5" -#define MISC4_GROUP_1 "/Group1" -#define MISC4_GROUP_2 "/Group2" - -/* Definitions for misc. test #5 */ -#define MISC5_FILE "tmisc5.h5" -#define MISC5_DSETNAME "dset1" -#define MISC5_DSETRANK 1 -#define MISC5_NELMTOPLVL 1 -#define MISC5_DBGNELM1 2 -#define MISC5_DBGNELM2 1 -#define MISC5_DBGNELM3 1 -#define MISC5_DBGELVAL1 999999999 -#define MISC5_DBGELVAL2 888888888 -#define MISC5_DBGELVAL3 777777777 - -typedef struct { - int st1_el1; - hvl_t st1_el2; -} misc5_struct1; - -typedef struct { - int st2_el1; - hvl_t st2_el2; -} misc5_struct2; - -typedef struct { - int st3_el1; -} misc5_struct3; - -typedef struct { - hid_t st3h_base; - hid_t st3h_id; -} misc5_struct3_hndl; - -typedef struct { - hid_t st2h_base; - hid_t st2h_id; - misc5_struct3_hndl *st2h_st3hndl; -} misc5_struct2_hndl; - -typedef struct { - hid_t st1h_base; - hid_t st1h_id; - misc5_struct2_hndl *st1h_st2hndl; -} misc5_struct1_hndl; - -/* Definitions for misc. test #6 */ -#define MISC6_FILE "tmisc6.h5" -#define MISC6_DSETNAME1 "dset1" -#define MISC6_DSETNAME2 "dset2" -#define MISC6_NUMATTR 16 - -/* Definitions for misc. test #7 */ -#define MISC7_FILE "tmisc7.h5" -#define MISC7_DSETNAME1 "Dataset1" -#define MISC7_DSETNAME2 "Dataset2" -#define MISC7_TYPENAME1 "Datatype1" -#define MISC7_TYPENAME2 "Datatype2" - -/* Definitions for misc. test #8 */ -#define MISC8_FILE "tmisc8.h5" -#define MISC8_DSETNAME1 "Dataset1" -#define MISC8_DSETNAME4 "Dataset4" -#define MISC8_DSETNAME5 "Dataset5" -#define MISC8_DSETNAME8 "Dataset8" - -#ifndef H5_HAVE_PARALLEL -#define MISC8_DSETNAME2 "Dataset2" -#define MISC8_DSETNAME3 "Dataset3" -#define MISC8_DSETNAME6 "Dataset6" -#define MISC8_DSETNAME7 "Dataset7" -#define MISC8_DSETNAME9 "Dataset9" -#define MISC8_DSETNAME10 "Dataset10" -#endif - -#define MISC8_RANK 2 -#define MISC8_DIM0 50 -#define MISC8_DIM1 50 -#define MISC8_CHUNK_DIM0 10 -#define MISC8_CHUNK_DIM1 10 - -/* Definitions for misc. test #9 */ -#define MISC9_FILE "tmisc9.h5" - -/* Definitions for misc. test #10 */ -#define MISC10_FILE_OLD "tmtimeo.h5" -#define MISC10_FILE_NEW "tmisc10.h5" -#define MISC10_DSETNAME "Dataset1" - -/* Definitions for misc. test #11 */ -#define MISC11_FILE "tmisc11.h5" -#define MISC11_USERBLOCK 1024 -#define MISC11_SIZEOF_OFF 4 -#define MISC11_SIZEOF_LEN 4 -#define MISC11_SYM_LK 8 -#define MISC11_SYM_IK 32 -#define MISC11_ISTORE_IK 64 -#define MISC11_NINDEXES 1 - -/* Definitions for misc. test #12 */ -#define MISC12_FILE "tmisc12.h5" -#define MISC12_DSET_NAME "Dataset" -#define MISC12_SPACE1_RANK 1 -#define MISC12_SPACE1_DIM1 4 -#define MISC12_CHUNK_SIZE 2 -#define MISC12_APPEND_SIZE 5 - -/* Definitions for misc. test #13 */ -#define MISC13_FILE_1 "tmisc13a.h5" -#define MISC13_FILE_2 "tmisc13b.h5" -#define MISC13_DSET1_NAME "Dataset1" -#define MISC13_DSET2_NAME "Dataset2" -#define MISC13_DSET3_NAME "Dataset3" -#define MISC13_GROUP1_NAME "Group1" -#define MISC13_GROUP2_NAME "Group2" -#define MISC13_DTYPE_NAME "Datatype" -#define MISC13_RANK 1 -#define MISC13_DIM1 600 -#define MISC13_CHUNK_DIM1 10 -#define MISC13_USERBLOCK_SIZE 512 -#define MISC13_COPY_BUF_SIZE 4096 - -/* Definitions for misc. test #14 */ -#define MISC14_FILE "tmisc14.h5" -#define MISC14_DSET1_NAME "Dataset1" -#define MISC14_DSET2_NAME "Dataset2" -#define MISC14_DSET3_NAME "Dataset3" -#define MISC14_METADATA_SIZE 4096 - -/* Definitions for misc. test #15 */ -#define MISC15_FILE "tmisc15.h5" -#define MISC15_BUF_SIZE 1024 - -/* Definitions for misc. test #16 */ -#define MISC16_FILE "tmisc16.h5" -#define MISC16_SPACE_DIM 4 -#define MISC16_SPACE_RANK 1 -#define MISC16_STR_SIZE 8 -#define MISC16_DSET_NAME "Dataset" - -/* Definitions for misc. test #17 */ -#define MISC17_FILE "tmisc17.h5" -#define MISC17_SPACE_RANK 2 -#define MISC17_SPACE_DIM1 4 -#define MISC17_SPACE_DIM2 8 -#define MISC17_DSET_NAME "Dataset" - -/* Definitions for misc. test #18 */ -#define MISC18_FILE "tmisc18.h5" -#define MISC18_DSET1_NAME "Dataset1" -#define MISC18_DSET2_NAME "Dataset2" - -/* Definitions for misc. test #19 */ -#define MISC19_FILE "tmisc19.h5" -#define MISC19_DSET_NAME "Dataset" -#define MISC19_ATTR_NAME "Attribute" -#define MISC19_GROUP_NAME "Group" - -/* Definitions for misc. test #20 */ -#define MISC20_FILE "tmisc20.h5" -#define MISC20_FILE_OLD "tlayouto.h5" -#define MISC20_DSET_NAME "Dataset" -#define MISC20_DSET2_NAME "Dataset2" -#define MISC20_SPACE_RANK 2 -/* Make sure the product of the following 2 does not get too close to */ -/* 64 bits, risking an overflow. */ -#define MISC20_SPACE_DIM0 (8 * 1024 * 1024 * (uint64_t)1024) -#define MISC20_SPACE_DIM1 ((256 * 1024 * (uint64_t)1024) + 1) -#define MISC20_SPACE2_DIM0 8 -#define MISC20_SPACE2_DIM1 4 - -#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) -/* Definitions for misc. test #21 */ -#define MISC21_FILE "tmisc21.h5" -#define MISC21_DSET_NAME "Dataset" -#define MISC21_SPACE_RANK 2 -#define MISC21_SPACE_DIM0 7639 -#define MISC21_SPACE_DIM1 6308 -#define MISC21_CHUNK_DIM0 2048 -#define MISC21_CHUNK_DIM1 2048 - -/* Definitions for misc. test #22 */ -#define MISC22_FILE "tmisc22.h5" -#define MISC22_DSET_NAME "Dataset" -#define MISC22_SPACE_RANK 2 -#define MISC22_CHUNK_DIM0 512 -#define MISC22_CHUNK_DIM1 512 -#define MISC22_SPACE_DIM0 639 -#define MISC22_SPACE_DIM1 1308 -#endif /* H5_HAVE_FILTER_SZIP */ - -/* Definitions for misc. test #23 */ -#define MISC23_FILE "tmisc23.h5" -#define MISC23_NAME_BUF_SIZE 40 - -/* Definitions for misc. test #24 */ -#define MISC24_FILE "tmisc24.h5" -#define MISC24_GROUP_NAME "group" -#define MISC24_GROUP_LINK "group_link" -#define MISC24_DATASET_NAME "dataset" -#define MISC24_DATASET_LINK "dataset_link" -#define MISC24_DATATYPE_NAME "datatype" -#define MISC24_DATATYPE_LINK "datatype_link" - -/* Definitions for misc. test #25 'a', 'b' & 'c' */ -#define MISC25A_FILE "foo.h5" -#define MISC25A_GROUP0_NAME "grp0" -#define MISC25A_GROUP1_NAME "/grp0/grp1" -#define MISC25A_GROUP2_NAME "/grp0/grp2" -#define MISC25A_GROUP3_NAME "/grp0/grp3" -#define MISC25A_ATTR1_NAME "_long attribute_" -#define MISC25A_ATTR1_LEN 11 -#define MISC25A_ATTR2_NAME "_short attr__" -#define MISC25A_ATTR2_LEN 11 -#define MISC25A_ATTR3_NAME "_short attr__" -#define MISC25A_ATTR3_LEN 1 -#define MISC25B_FILE "mergemsg.h5" -#define MISC25B_GROUP "grp1" -#define MISC25C_FILE "nc4_rename.h5" -#define MISC25C_DSETNAME "da" -#define MISC25C_DSETNAME2 "dz" -#define MISC25C_DSETGRPNAME "ga" -#define MISC25C_GRPNAME "gb" -#define MISC25C_GRPNAME2 "gc" -#define MISC25C_ATTRNAME "aa" -#define MISC25C_ATTRNAME2 "ab" - -/* Definitions for misc. test #26 */ -#define MISC26_FILE "dcpl_file" - -/* Definitions for misc. test #27 */ -/* (Note that this test file is generated by the "gen_bad_ohdr.c" code) */ -#define MISC27_FILE "tbad_msg_count.h5" -#define MISC27_GROUP "Group" - -/* Definitions for misc. test #28 */ -#define MISC28_FILE "tmisc28.h5" -#define MISC28_SIZE 10 -#define MISC28_NSLOTS 10000 - -/* Definitions for misc. test #29 */ -#define MISC29_ORIG_FILE "specmetaread.h5" -#define MISC29_COPY_FILE "tmisc29.h5" -#define MISC29_DSETNAME "dset2" - -/* Definitions for misc. test #30 */ -#define MISC30_FILE "tmisc30.h5" - -#ifndef H5_NO_DEPRECATED_SYMBOLS -/* Definitions for misc. test #31 */ -#define MISC31_FILE "tmisc31.h5" -#define MISC31_DSETNAME "dset" -#define MISC31_ATTRNAME1 "attr1" -#define MISC31_ATTRNAME2 "attr2" -#define MISC31_GROUPNAME "group" -#define MISC31_PROPNAME "misc31_prop" -#define MISC31_DTYPENAME "dtype" -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - -/* Definitions for misc. test #33 */ -/* Note that this test file is generated by "gen_bad_offset.c" */ -/* and bad offset values are written to that file for testing */ -#define MISC33_FILE "bad_offset.h5" - -/* Definitions for misc. test #35 */ -#define MISC35_SPACE_RANK 3 -#define MISC35_SPACE_DIM1 3 -#define MISC35_SPACE_DIM2 15 -#define MISC35_SPACE_DIM3 13 -#define MISC35_NPOINTS 10 - -/* Definitions for misc. test #37 */ -/* The test file is formerly named h5_nrefs_POC. - See https://nvd.nist.gov/vuln/detail/CVE-2020-10812 */ -#define CVE_2020_10812_FILENAME "cve_2020_10812.h5" - -#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) -/*------------------------------------------------------------------------- - * Function: h5_szip_can_encode - * - * Purpose: Retrieve the filter config flags for szip, tell if - * encoder is available. - * - * Return: 1: decode+encode is enabled - * 0: only decode is enabled - * -1: other - *------------------------------------------------------------------------- - */ -int -h5_szip_can_encode(void) -{ - unsigned int filter_config_flags; - - H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags); - if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) { - /* filter present but neither encode nor decode is supported (???) */ - return -1; - } - else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == - H5Z_FILTER_CONFIG_DECODE_ENABLED) { - /* decoder only: read but not write */ - return 0; - } - else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == - H5Z_FILTER_CONFIG_ENCODE_ENABLED) { - /* encoder only: write but not read (???) */ - return -1; - } - else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == - (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) { - return 1; - } - return (-1); -} -#endif /* H5_HAVE_FILTER_SZIP */ - -/**************************************************************** -** -** test_misc1(): test unlinking a dataset from a group and immediately -** re-using the dataset name -** -****************************************************************/ -static void -test_misc1(void) -{ - int i; - int i_check; - hid_t file, dataspace, dataset; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Unlinking Dataset and Re-creating It\n")); - - file = H5Fcreate(MISC1_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - dataspace = H5Screate(H5S_SCALAR); - CHECK(dataspace, FAIL, "H5Screate"); - - /* Write the dataset the first time. */ - dataset = - H5Dcreate2(file, MISC1_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - i = MISC1_VAL; - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Remove the dataset. */ - ret = H5Ldelete(file, MISC1_DSET_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Write the dataset for the second time with a different value. */ - dataset = - H5Dcreate2(file, MISC1_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - i = MISC1_VAL2; - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(dataspace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Now, check the value written to the dataset, after it was re-created */ - file = H5Fopen(MISC1_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - - dataspace = H5Screate(H5S_SCALAR); - CHECK(dataspace, FAIL, "H5Screate"); - - dataset = H5Dopen2(file, MISC1_DSET_NAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i_check); - CHECK(ret, FAIL, "H5Dread"); - VERIFY(i_check, MISC1_VAL2, "H5Dread"); - - ret = H5Sclose(dataspace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_misc1() */ - -static hid_t -misc2_create_type(void) -{ - hid_t type, type_tmp; - herr_t ret; - - type_tmp = H5Tcopy(H5T_C_S1); - CHECK(type_tmp, FAIL, "H5Tcopy"); - - ret = H5Tset_size(type_tmp, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - type = H5Tcreate(H5T_COMPOUND, sizeof(misc2_struct)); - CHECK(type, FAIL, "H5Tcreate"); - - ret = H5Tinsert(type, "string", offsetof(misc2_struct, string), type_tmp); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tclose(type_tmp); - CHECK(ret, FAIL, "H5Tclose"); - - return type; -} - -static void -test_misc2_write_attribute(void) -{ - hid_t file1, file2, root1, root2, dataspace, att1, att2; - hid_t type; - herr_t ret; - misc2_struct data, data_check; - char *string_att1 = strdup("string attribute in file one"); - char *string_att2 = strdup("string attribute in file two"); - - memset(&data, 0, sizeof(data)); - memset(&data_check, 0, sizeof(data_check)); - - type = misc2_create_type(); - - dataspace = H5Screate(H5S_SCALAR); - CHECK(dataspace, FAIL, "H5Screate"); - - file2 = H5Fcreate(MISC2_FILE_2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file2, FAIL, "H5Fcreate"); - - file1 = H5Fcreate(MISC2_FILE_1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1, FAIL, "H5Fcreate"); - - root1 = H5Gopen2(file1, "/", H5P_DEFAULT); - CHECK(root1, FAIL, "H5Gopen2"); - - att1 = H5Acreate2(root1, MISC2_ATT_NAME_1, type, dataspace, H5P_DEFAULT, H5P_DEFAULT); - CHECK(att1, FAIL, "H5Acreate2"); - - data.string = string_att1; - - ret = H5Awrite(att1, type, &data); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Aread(att1, type, &data_check); - CHECK(ret, FAIL, "H5Aread"); - - ret = H5Treclaim(type, dataspace, H5P_DEFAULT, &data_check); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Aclose(att1); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Gclose(root1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file1); - CHECK(ret, FAIL, "H5Fclose"); - - root2 = H5Gopen2(file2, "/", H5P_DEFAULT); - CHECK(root2, FAIL, "H5Gopen2"); - - att2 = H5Acreate2(root2, MISC2_ATT_NAME_2, type, dataspace, H5P_DEFAULT, H5P_DEFAULT); - CHECK(att2, FAIL, "H5Acreate2"); - - data.string = string_att2; - - ret = H5Awrite(att2, type, &data); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Aread(att2, type, &data_check); - CHECK(ret, FAIL, "H5Aread"); - - ret = H5Treclaim(type, dataspace, H5P_DEFAULT, &data_check); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Aclose(att2); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Gclose(root2); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Sclose(dataspace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file2); - CHECK(ret, FAIL, "H5Fclose"); - - free(string_att1); - free(string_att2); -} - -static void -test_misc2_read_attribute(const char *filename, const char *att_name) -{ - hid_t file, root, att; - hid_t type; - hid_t space; - herr_t ret; - misc2_struct data_check; - - type = misc2_create_type(); - - file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - - root = H5Gopen2(file, "/", H5P_DEFAULT); - CHECK(root, FAIL, "H5Gopen2"); - - att = H5Aopen(root, att_name, H5P_DEFAULT); - CHECK(att, FAIL, "H5Aopen"); - - space = H5Aget_space(att); - CHECK(space, FAIL, "H5Aget_space"); - - ret = H5Aread(att, type, &data_check); - CHECK(ret, FAIL, "H5Aread"); - - ret = H5Treclaim(type, space, H5P_DEFAULT, &data_check); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Sclose(space); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Aclose(att); - CHECK(ret, FAIL, "H5Aclose"); - - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Gclose(root); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} -/**************************************************************** -** -** test_misc2(): test using the same VL-derived datatype in two -** different files, which was causing problems with the -** datatype conversion functions -** -****************************************************************/ -static void -test_misc2(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing VL datatype in two different files\n")); - - test_misc2_write_attribute(); - test_misc2_read_attribute(MISC2_FILE_1, MISC2_ATT_NAME_1); - test_misc2_read_attribute(MISC2_FILE_2, MISC2_ATT_NAME_2); -} /* end test_misc2() */ - -/**************************************************************** -** -** test_misc3(): Test reading from chunked dataset with non-zero -** fill value -** -****************************************************************/ -static void -test_misc3(void) -{ - hid_t file, dataspace, dataset, dcpl; - int rank = MISC3_RANK; - hsize_t dims[MISC3_RANK] = {MISC3_DIM1, MISC3_DIM2}; - hsize_t chunk_dims[MISC3_RANK] = {MISC3_CHUNK_DIM1, MISC3_CHUNK_DIM2}; - int fill = MISC3_FILL_VALUE; - int read_buf[MISC3_DIM1][MISC3_DIM2]; - int i, j; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing reading from chunked dataset with non-zero fill-value\n")); - - file = H5Fcreate(MISC3_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create a simple dataspace */ - dataspace = H5Screate_simple(rank, dims, NULL); - CHECK(dataspace, FAIL, "H5Screate_simple"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set the chunk information */ - ret = H5Pset_chunk(dcpl, rank, chunk_dims); - CHECK(dcpl, FAIL, "H5Pset_chunk"); - - /* Set the fill-value information */ - ret = H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fill); - CHECK(dcpl, FAIL, "H5Pset_fill_value"); - - /* Create the dataset */ - dataset = H5Dcreate2(file, MISC3_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Read from the dataset (should be fill-values) */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &read_buf); - CHECK(ret, FAIL, "H5Dread"); - - for (i = 0; i < MISC3_DIM1; i++) - for (j = 0; j < MISC3_DIM2; j++) - VERIFY(read_buf[i][j], fill, "H5Dread"); - - /* Release resources */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Sclose(dataspace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc3() */ - -/**************************************************************** -** -** test_misc4(): Test the that 'fileno' field in H5O_info_t is -** valid. -** -****************************************************************/ -static void -test_misc4(void) -{ - hid_t file1, file2, group1, group2, group3; - H5O_info2_t oinfo1, oinfo2, oinfo3; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing fileno working in H5O_info2_t\n")); - - file1 = H5Fcreate(MISC4_FILE_1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1, FAIL, "H5Fcreate"); - - /* Create the first group */ - group1 = H5Gcreate2(file1, MISC4_GROUP_1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group1, FAIL, "H5Gcreate2"); - - /* Create the second group */ - group2 = H5Gcreate2(file1, MISC4_GROUP_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group2, FAIL, "H5Gcreate2"); - - file2 = H5Fcreate(MISC4_FILE_2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file2, FAIL, "H5Fcreate"); - - /* Create the first group */ - group3 = H5Gcreate2(file2, MISC4_GROUP_1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group3, FAIL, "H5Gcreate2"); - - /* Get the stat information for each group */ - ret = H5Oget_info_by_name3(file1, MISC4_GROUP_1, &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(file1, MISC4_GROUP_2, &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - ret = H5Oget_info_by_name3(file2, MISC4_GROUP_1, &oinfo3, H5O_INFO_BASIC, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name3"); - - /* Verify that the fileno values are the same for groups from file1 */ - VERIFY(oinfo1.fileno, oinfo2.fileno, "H5Oget_info_by_name"); - - /* Verify that the fileno values are not the same between file1 & file2 */ - if (oinfo1.fileno == oinfo3.fileno) - TestErrPrintf("Error on line %d: oinfo1.fileno != oinfo3.fileno\n", __LINE__); - if (oinfo2.fileno == oinfo3.fileno) - TestErrPrintf("Error on line %d: oinfo2.fileno != oinfo3.fileno\n", __LINE__); - - /* Close the objects */ - ret = H5Gclose(group1); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(group2); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Gclose(group3); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file1); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Fclose(file2); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc4() */ - -/**************************************************************** -** -** test_misc5(): Test several level deep nested compound & VL datatypes -** -****************************************************************/ - -/*********************** struct3 ***********************/ - -static misc5_struct3_hndl * -create_struct3(void) -{ - misc5_struct3_hndl *str3hndl; /* New 'struct3' created */ - herr_t ret; /* For error checking */ - - str3hndl = (misc5_struct3_hndl *)malloc(sizeof(misc5_struct3_hndl)); - CHECK_PTR(str3hndl, "malloc"); - - str3hndl->st3h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct3)); - CHECK(str3hndl->st3h_base, FAIL, "H5Tcreate"); - - ret = H5Tinsert(str3hndl->st3h_base, "st3_el1", HOFFSET(misc5_struct3, st3_el1), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - str3hndl->st3h_id = H5Tvlen_create(str3hndl->st3h_base); - CHECK(str3hndl->st3h_id, FAIL, "H5Tvlen_create"); - - return str3hndl; -} - -static void -delete_struct3(misc5_struct3_hndl *str3hndl) -{ - herr_t ret; /* For error checking */ - - ret = H5Tclose(str3hndl->st3h_id); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Tclose(str3hndl->st3h_base); - CHECK(ret, FAIL, "H5Tclose"); - - free(str3hndl); -} - -static void -set_struct3(misc5_struct3 *buf) -{ - buf->st3_el1 = MISC5_DBGELVAL3; -} - -/*********************** struct2 ***********************/ - -static misc5_struct2_hndl * -create_struct2(void) -{ - misc5_struct2_hndl *str2hndl; /* New 'struct2' created */ - herr_t ret; /* For error checking */ - - str2hndl = (misc5_struct2_hndl *)malloc(sizeof(misc5_struct2_hndl)); - CHECK_PTR(str2hndl, "malloc"); - - str2hndl->st2h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct2)); - CHECK(str2hndl->st2h_base, FAIL, "H5Tcreate"); - - ret = H5Tinsert(str2hndl->st2h_base, "st2_el1", HOFFSET(misc5_struct2, st2_el1), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - str2hndl->st2h_st3hndl = create_struct3(); - CHECK_PTR(str2hndl->st2h_st3hndl, "create_struct3"); - - ret = H5Tinsert(str2hndl->st2h_base, "st2_el2", HOFFSET(misc5_struct2, st2_el2), - str2hndl->st2h_st3hndl->st3h_id); - CHECK(ret, FAIL, "H5Tinsert"); - - str2hndl->st2h_id = H5Tvlen_create(str2hndl->st2h_base); - CHECK(str2hndl->st2h_id, FAIL, "H5Tvlen_create"); - - return str2hndl; -} - -static void -delete_struct2(misc5_struct2_hndl *str2hndl) -{ - herr_t ret; /* For error checking */ - - ret = H5Tclose(str2hndl->st2h_id); - CHECK(ret, FAIL, "H5Tclose"); - - delete_struct3(str2hndl->st2h_st3hndl); - - H5Tclose(str2hndl->st2h_base); - CHECK(ret, FAIL, "H5Tclose"); - - free(str2hndl); -} - -static void -set_struct2(misc5_struct2 *buf) -{ - unsigned i; /* Local index variable */ - - buf->st2_el1 = MISC5_DBGELVAL2; - buf->st2_el2.len = MISC5_DBGNELM3; - - buf->st2_el2.p = malloc((buf->st2_el2.len) * sizeof(misc5_struct3)); - CHECK_PTR(buf->st2_el2.p, "malloc"); - - for (i = 0; i < (buf->st2_el2.len); i++) - set_struct3(&(((misc5_struct3 *)(buf->st2_el2.p))[i])); -} - -static void -clear_struct2(misc5_struct2 *buf) -{ - free(buf->st2_el2.p); -} - -/*********************** struct1 ***********************/ - -static misc5_struct1_hndl * -create_struct1(void) -{ - misc5_struct1_hndl *str1hndl; /* New 'struct1' created */ - herr_t ret; /* For error checking */ - - str1hndl = (misc5_struct1_hndl *)malloc(sizeof(misc5_struct1_hndl)); - CHECK_PTR(str1hndl, "malloc"); - - str1hndl->st1h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct1)); - CHECK(str1hndl->st1h_base, FAIL, "H5Tcreate"); - - ret = H5Tinsert(str1hndl->st1h_base, "st1_el1", HOFFSET(misc5_struct1, st1_el1), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - str1hndl->st1h_st2hndl = create_struct2(); - CHECK_PTR(str1hndl->st1h_st2hndl, "create_struct2"); - - ret = H5Tinsert(str1hndl->st1h_base, "st1_el2", HOFFSET(misc5_struct1, st1_el2), - str1hndl->st1h_st2hndl->st2h_id); - CHECK(ret, FAIL, "H5Tinsert"); - - str1hndl->st1h_id = H5Tvlen_create(str1hndl->st1h_base); - CHECK(str1hndl->st1h_id, FAIL, "H5Tvlen_create"); - - return str1hndl; -} - -static void -delete_struct1(misc5_struct1_hndl *str1hndl) -{ - herr_t ret; /* For error checking */ - - ret = H5Tclose(str1hndl->st1h_id); - CHECK(ret, FAIL, "H5Tclose"); - - delete_struct2(str1hndl->st1h_st2hndl); - - ret = H5Tclose(str1hndl->st1h_base); - CHECK(ret, FAIL, "H5Tclose"); - - free(str1hndl); -} - -static void -set_struct1(misc5_struct1 *buf) -{ - unsigned i; /* Local index variable */ - - buf->st1_el1 = MISC5_DBGELVAL1; - buf->st1_el2.len = MISC5_DBGNELM2; - - buf->st1_el2.p = malloc((buf->st1_el2.len) * sizeof(misc5_struct2)); - CHECK_PTR(buf->st1_el2.p, "malloc"); - - for (i = 0; i < (buf->st1_el2.len); i++) - set_struct2(&(((misc5_struct2 *)(buf->st1_el2.p))[i])); -} - -static void -clear_struct1(misc5_struct1 *buf) -{ - unsigned i; - - for (i = 0; i < buf->st1_el2.len; i++) - clear_struct2(&(((misc5_struct2 *)(buf->st1_el2.p))[i])); - free(buf->st1_el2.p); -} - -static void -test_misc5(void) -{ - hid_t loc_id, space_id, dataset_id; - hid_t mem_type_id; - misc5_struct1_hndl *str1hndl; - hsize_t dims[MISC5_DSETRANK]; - hvl_t buf; - unsigned i, j, k; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing several level deep nested compound & VL datatypes \n")); - - /* Write the dataset out */ - loc_id = H5Fcreate(MISC5_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(loc_id, FAIL, "H5Fcreate"); - - /* Create the memory structure to write */ - str1hndl = create_struct1(); - CHECK_PTR(str1hndl, "create_struct1"); - - /* Create the dataspace */ - dims[0] = MISC5_NELMTOPLVL; - space_id = H5Screate_simple(MISC5_DSETRANK, dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - /* Create the dataset */ - dataset_id = H5Dcreate2(loc_id, MISC5_DSETNAME, str1hndl->st1h_id, space_id, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dcreate2"); - - /* Create the variable-length buffer */ - buf.len = MISC5_DBGNELM1; - buf.p = malloc((buf.len) * sizeof(misc5_struct1)); - CHECK_PTR(buf.p, "malloc"); - - /* Create the top-level VL information */ - for (i = 0; i < MISC5_DBGNELM1; i++) - set_struct1(&(((misc5_struct1 *)(buf.p))[i])); - - /* Write the data out */ - ret = H5Dwrite(dataset_id, str1hndl->st1h_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Release the top-level VL information */ - for (j = 0; j < MISC5_DBGNELM1; j++) - clear_struct1(&(((misc5_struct1 *)(buf.p))[j])); - - /* Free the variable-length buffer */ - free(buf.p); - - /* Close dataset */ - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataspace */ - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - /* Delete memory structures */ - delete_struct1(str1hndl); - - /* Close file */ - ret = H5Fclose(loc_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Read the dataset back in & verify it */ - loc_id = H5Fopen(MISC5_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(loc_id, FAIL, "H5Fopen"); - - /* Open dataset again */ - dataset_id = H5Dopen2(loc_id, MISC5_DSETNAME, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dopen2"); - - /* Get the dataset's datatype */ - mem_type_id = H5Dget_type(dataset_id); - CHECK(mem_type_id, FAIL, "H5Dget_type"); - - /* Get the dataset's dataspace */ - space_id = H5Dget_space(dataset_id); - CHECK(space_id, FAIL, "H5Dget_space"); - - /* Read the data back in */ - ret = H5Dread(dataset_id, mem_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the correct information was read in */ - for (i = 0; i < (buf.len); i++) { - /* printf("[%d]=%d\n",i, ((misc5_struct1 *)(buf.p))[i].st1_el1); */ - VERIFY(((misc5_struct1 *)(buf.p))[i].st1_el1, MISC5_DBGELVAL1, "H5Dread"); - for (j = 0; j < (((misc5_struct1 *)(buf.p))[i].st1_el2.len); j++) { - /* printf(" [%d]=%d\n",j, ((misc5_struct2 *)(((misc5_struct1 *) - * (buf.p))[i].st1_el2.p))[j].st2_el1); */ - VERIFY(((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j].st2_el1, MISC5_DBGELVAL2, - "H5Dread"); - for (k = 0; k < (((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j].st2_el2.len); - k++) { - /* printf(" [%d]=%d\n",k, ((misc5_struct3 *)(((misc5_struct2 *) (((misc5_struct1 - * *)(buf.p))[i]. st1_el2.p))[j].st2_el2.p))[k].st3_el1); */ - VERIFY(((misc5_struct3 *)(((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j] - .st2_el2.p))[k] - .st3_el1, - MISC5_DBGELVAL3, "H5Dread"); - } /* end for */ - } - } - - /* Reclaim the memory for the VL information */ - ret = H5Treclaim(mem_type_id, space_id, H5P_DEFAULT, &buf); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close dataspace */ - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset */ - ret = H5Tclose(mem_type_id); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close dataset */ - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(loc_id); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_misc5() */ - -/**************************************************************** -** -** test_misc6(): Test that object header continuation messages are -** created correctly. -** -****************************************************************/ -static void -test_misc6(void) -{ - hid_t loc_id, space_id, dataset_id; - hid_t attr_id; - char attr_name[16]; - unsigned u; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing object header continuation code \n")); - - /* Create the file */ - loc_id = H5Fcreate(MISC6_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(loc_id, FAIL, "H5Fcreate"); - - /* Create the dataspace */ - space_id = H5Screate(H5S_SCALAR); - CHECK(space_id, FAIL, "H5Screate"); - - /* Create the first dataset */ - dataset_id = - H5Dcreate2(loc_id, MISC6_DSETNAME1, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dcreate2"); - - /* Close dataset */ - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create the second dataset */ - dataset_id = - H5Dcreate2(loc_id, MISC6_DSETNAME2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dcreate2"); - - /* Close dataset */ - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(loc_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Loop through adding attributes to each dataset */ - for (u = 0; u < MISC6_NUMATTR; u++) { - /* Create name for attribute */ - snprintf(attr_name, sizeof(attr_name), "Attr#%u", u); - - /* Open the file */ - loc_id = H5Fopen(MISC6_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(loc_id, FAIL, "H5Fopen"); - - /* Open first dataset */ - dataset_id = H5Dopen2(loc_id, MISC6_DSETNAME1, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dopen2"); - - /* Add attribute to dataset */ - attr_id = H5Acreate2(dataset_id, attr_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open second dataset */ - dataset_id = H5Dopen2(loc_id, MISC6_DSETNAME2, H5P_DEFAULT); - CHECK(dataset_id, FAIL, "H5Dopen2"); - - /* Add attribute to dataset */ - attr_id = H5Acreate2(dataset_id, attr_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - - /* Close attribute */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dataset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(loc_id); - CHECK(ret, FAIL, "H5Fclose"); - } /* end for */ - - /* Close dataspace */ - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - -} /* end test_misc6() */ - -/**************************************************************** -** -** test_misc7(): Test that datatypes are sensible to store on -** disk. (i.e. not partially initialized) -** -****************************************************************/ -#if 0 -static void -test_misc7(void) -{ - hid_t fid, did, tid, sid; - int enum_value = 1; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing sensible datatype on disk code \n")); - - /* Attempt to commit a non-sensible datatype */ - - /* Create the file */ - fid = H5Fcreate(MISC7_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create the dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create the compound datatype to commit*/ - tid = H5Tcreate(H5T_COMPOUND, (size_t)32); - CHECK(tid, FAIL, "H5Tcreate"); - - /* Attempt to commit an empty compound datatype */ - ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VERIFY(ret, FAIL, "H5Tcommit2"); - - /* Attempt to use empty compound datatype to create dataset */ - did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VERIFY(ret, FAIL, "H5Dcreate2"); - - /* Add a field to the compound datatype */ - ret = H5Tinsert(tid, "a", (size_t)0, H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Attempt to commit the compound datatype now - should work */ - ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Attempt to use compound datatype to create dataset now - should work */ - did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Close dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close compound datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create the enum datatype to commit*/ - tid = H5Tenum_create(H5T_NATIVE_INT); - CHECK(tid, FAIL, "H5Tenum_create"); - - /* Attempt to commit an empty enum datatype */ - ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VERIFY(ret, FAIL, "H5Tcommit2"); - - /* Attempt to use empty enum datatype to create dataset */ - did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VERIFY(did, FAIL, "H5Dcreate2"); - - /* Add a member to the enum datatype */ - ret = H5Tenum_insert(tid, "a", &enum_value); - CHECK(ret, FAIL, "H5Tenum_insert"); - - /* Attempt to commit the enum datatype now - should work */ - ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Attempt to use enum datatype to create dataset now - should work */ - did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Close dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close enum datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_misc7() */ -#endif - -/**************************************************************** -** -** test_misc8(): Test storage size of various types of dataset -** storage methods. -** -****************************************************************/ -#if 0 -static void -test_misc8(void) -{ - hid_t fid, did, sid; - hid_t fapl; /* File access property list */ - hid_t dcpl; /* Dataset creation property list */ - int rank = MISC8_RANK; - hsize_t dims[MISC8_RANK] = {MISC8_DIM0, MISC8_DIM1}; - hsize_t chunk_dims[MISC8_RANK] = {MISC8_CHUNK_DIM0, MISC8_CHUNK_DIM1}; - hsize_t storage_size; /* Number of bytes of raw data storage used */ - int *wdata; /* Data to write */ - int *tdata; /* Temporary pointer to data write */ -#ifdef VERIFY_DATA - int *rdata; /* Data to read */ - int *tdata2; /* Temporary pointer to data to read */ -#endif /* VERIFY_DATA */ - unsigned u, v; /* Local index variables */ - int mdc_nelmts; /* Metadata number of elements */ - size_t rdcc_nelmts; /* Raw data number of elements */ - size_t rdcc_nbytes; /* Raw data number of bytes */ - double rdcc_w0; /* Raw data write percentage */ - hsize_t start[MISC8_RANK]; /* Hyperslab start */ - hsize_t count[MISC8_RANK]; /* Hyperslab block count */ - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing dataset storage sizes\n")); - - /* Allocate space for the data to write & read */ - wdata = (int *)malloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); - CHECK_PTR(wdata, "malloc"); -#ifdef VERIFY_DATA - rdata = (int *)malloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); - CHECK_PTR(rdata, "malloc"); -#endif /* VERIFY_DATA */ - - /* Initialize values */ - tdata = wdata; - for (u = 0; u < MISC8_DIM0; u++) - for (v = 0; v < MISC8_DIM1; v++) - *tdata++ = (int)(((u * MISC8_DIM1) + v) % 13); - - /* Create a file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Get the default file access properties for caching */ - ret = H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); - CHECK(ret, FAIL, "H5Pget_cache"); - - /* Decrease the size of the raw data cache */ - rdcc_nbytes = 0; - - /* Set the file access properties for caching */ - ret = H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); - CHECK(ret, FAIL, "H5Pset_cache"); - - /* Create the file */ - fid = H5Fcreate(MISC8_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file access property list */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Create a simple dataspace */ - sid = H5Screate_simple(rank, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Select a hyperslab which coincides with chunk boundaries */ - /* (For later use) */ - start[0] = 1; - start[1] = 1; - count[0] = (MISC8_CHUNK_DIM0 * 2) - 1; - count[1] = (MISC8_CHUNK_DIM1 * 2) - 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* I. contiguous dataset tests */ - - ret = H5Pset_layout(dcpl, H5D_CONTIGUOUS); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a contiguous dataset, with space allocation early */ - did = H5Dcreate2(fid, MISC8_DSETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); - VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - -#ifndef H5_HAVE_PARALLEL - /* Set the space allocation time to late */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a contiguous dataset, with space allocation late */ - did = H5Dcreate2(fid, MISC8_DSETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size before data is written */ - storage_size = H5Dget_storage_size(did); - VERIFY(storage_size, 0, "H5Dget_storage_size"); - - /* Write data */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); - VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Set the space allocation time to incremental */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a contiguous dataset, with space allocation late */ - did = H5Dcreate2(fid, MISC8_DSETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size before data is written */ - storage_size = H5Dget_storage_size(did); - VERIFY(storage_size, 0, "H5Dget_storage_size"); - - /* Write data */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); - VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); -#endif /* H5_HAVE_PARALLEL */ - - /* II. compact dataset tests */ - ret = H5Pset_layout(dcpl, H5D_COMPACT); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Set the space allocation time to late */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a contiguous dataset, with space allocation late */ - /* Should fail */ - H5E_BEGIN_TRY - { - did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(did, FAIL, "H5Dcreate2"); - - /* Set the space allocation time to incremental */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a contiguous dataset, with space allocation incremental */ - /* Should fail */ - H5E_BEGIN_TRY - { - did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(did, FAIL, "H5Dcreate2"); - - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Set the fill time to allocation */ - ret = H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a contiguous dataset, with space allocation early */ - did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); - VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* III. chunked dataset tests */ - - ret = H5Pset_layout(dcpl, H5D_CHUNKED); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Use chunked storage for this dataset */ - ret = H5Pset_chunk(dcpl, rank, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create a chunked dataset, with space allocation early */ - did = H5Dcreate2(fid, MISC8_DSETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); - VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - -#ifndef H5_HAVE_PARALLEL - /* Set the space allocation time to late */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Use chunked storage for this dataset */ - ret = H5Pset_chunk(dcpl, rank, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create a chunked dataset, with space allocation late */ - did = H5Dcreate2(fid, MISC8_DSETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size after dataset is created */ - storage_size = H5Dget_storage_size(did); - VERIFY(storage_size, 0, "H5Dget_storage_size"); - - /* Write part of the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); - VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Set the space allocation time to incremental */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a chunked dataset, with space allocation incremental */ - did = H5Dcreate2(fid, MISC8_DSETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size before data is written */ - storage_size = H5Dget_storage_size(did); - VERIFY(storage_size, 0, "H5Dget_storage_size"); - - /* Write part of the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check the storage size after only four chunks are written */ - storage_size = H5Dget_storage_size(did); - VERIFY(storage_size, (hsize_t)(4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Write entire dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - -#ifdef VERIFY_DATA - /* Read data */ - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check values written */ - tdata = wdata; - tdata2 = rdata; - for (u = 0; u < MISC8_DIM0; u++) - for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++) - if (*tdata != *tdata2) - TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, - (unsigned)v, (int)*tdata, (int)*tdata2); -#endif /* VERIFY_DATA */ - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); - VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5Dget_storage_size"); - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); -#endif /* H5_HAVE_PARALLEL */ - - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Use compression as well as chunking for these datasets */ -#ifdef H5_HAVE_FILTER_DEFLATE - ret = H5Pset_deflate(dcpl, 9); - CHECK(ret, FAIL, "H5Pset_deflate"); -#endif /* end H5_HAVE_FILTER_DEFLATE */ - - /* Create a chunked dataset, with space allocation early */ - did = H5Dcreate2(fid, MISC8_DSETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Write part of the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); -#ifdef H5_HAVE_FILTER_DEFLATE - if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#else /* Compression is not configured */ - if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#endif /* H5_HAVE_FILTER_DEFLATE */ - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - -#ifndef H5_HAVE_PARALLEL - /* Set the space allocation time to late */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a chunked dataset, with space allocation late */ - did = H5Dcreate2(fid, MISC8_DSETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size before data is written */ - storage_size = H5Dget_storage_size(did); - VERIFY(storage_size, 0, "H5Dget_storage_size"); - - /* Write part of the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check the storage size after only four chunks are written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); -#ifdef H5_HAVE_FILTER_DEFLATE - if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#else /* Compression is not configured */ - if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#endif /* H5_HAVE_FILTER_DEFLATE */ - - /* Write entire dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - -#ifdef VERIFY_DATA - /* Read data */ - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check values written */ - tdata = wdata; - tdata2 = rdata; - for (u = 0; u < MISC8_DIM0; u++) - for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++) - if (*tdata != *tdata2) - TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, - (unsigned)v, (int)*tdata, (int)*tdata2); -#endif /* VERIFY_DATA */ - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); -#ifdef H5_HAVE_FILTER_DEFLATE - if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#else - if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#endif /*H5_HAVE_FILTER_DEFLATE*/ - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Set the space allocation time to incremental */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create a chunked dataset, with space allocation incremental */ - did = H5Dcreate2(fid, MISC8_DSETNAME10, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the storage size before data is written */ - storage_size = H5Dget_storage_size(did); - VERIFY(storage_size, 0, "H5Dget_storage_size"); - - /* Write part of the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check the storage size after only four chunks are written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); -#ifdef H5_HAVE_FILTER_DEFLATE - if (storage_size >= (4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#else /* Compression is not configured */ - if (storage_size != (4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#endif /* H5_HAVE_FILTER_DEFLATE */ - - /* Write entire dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - -#ifdef VERIFY_DATA - /* Read data */ - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check values written */ - tdata = wdata; - tdata2 = rdata; - for (u = 0; u < MISC8_DIM0; u++) - for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++) - if (*tdata != *tdata2) - TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, - (unsigned)v, (int)*tdata, (int)*tdata2); -#endif /* VERIFY_DATA */ - - /* Check the storage size after data is written */ - storage_size = H5Dget_storage_size(did); - CHECK(storage_size, 0, "H5Dget_storage_size"); -#ifdef H5_HAVE_FILTER_DEFLATE - if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#else - if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT))) - TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__, - (unsigned)storage_size); -#endif /*H5_HAVE_FILTER_DEFLATE*/ - - /* Close dataset ID */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); -#endif /* H5_HAVE_PARALLEL */ - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free the read & write buffers */ - free(wdata); -#ifdef VERIFY_DATA - free(rdata); -#endif /* VERIFY_DATA */ -} /* end test_misc8() */ -#endif - -/**************************************************************** -** -** test_misc9(): Test that H5Fopen() does not succeed for core -** files, H5Fcreate() must be used to open them. -** -****************************************************************/ -static void -test_misc9(void) -{ - hid_t fapl, fid; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing core file opening\n")); - - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - ret = H5Pset_fapl_core(fapl, (size_t)1024, 0); - CHECK(ret, FAIL, "H5Pset_fapl_core"); - - H5E_BEGIN_TRY - { - fid = H5Fopen(MISC9_FILE, H5F_ACC_RDWR, fapl); - } - H5E_END_TRY - VERIFY(fid, FAIL, "H5Fopen"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pset_fapl_core"); -} /* end test_misc9() */ - -/**************************************************************** -** -** test_misc10(): Test opening a dataset created with an older -** version of the library (shares the tmtimeo.h5 file with the mtime.c -** test - see notes in gen_old_mtime.c for notes on generating this -** data file) and using the dataset creation property list from -** that dataset to create a dataset with the current version of -** the library. Also tests using file creation property in same way. -** -****************************************************************/ -#if 0 -static void -test_misc10(void) -{ - hid_t file, file_new; /* File IDs for old & new files */ - hid_t fcpl; /* File creation property list */ - hid_t dataset, dataset_new; /* Dataset IDs for old & new datasets */ - hid_t dcpl; /* Dataset creation property list */ - hid_t space, type; /* Old dataset's dataspace & datatype */ - const char *testfile = H5_get_srcdir_filename(MISC10_FILE_OLD); /* Corrected test file name */ - bool driver_is_default_compatible; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing using old dataset creation property list\n")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - /* - * Open the old file and the dataset and get old settings. - */ - file = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - fcpl = H5Fget_create_plist(file); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); - - dataset = H5Dopen2(file, MISC10_DSETNAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - dcpl = H5Dget_create_plist(dataset); - CHECK(dcpl, FAIL, "H5Dget_create_plist"); - space = H5Dget_space(dataset); - CHECK(space, FAIL, "H5Dget_space"); - type = H5Dget_type(dataset); - CHECK(type, FAIL, "H5Dget_type"); - - /* Create new file & dataset */ - file_new = H5Fcreate(MISC10_FILE_NEW, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT); - CHECK(file_new, FAIL, "H5Fcreate"); - - dataset_new = H5Dcreate2(file_new, MISC10_DSETNAME, type, space, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset_new, FAIL, "H5Dcreate2"); - - /* Close new dataset & file */ - ret = H5Dclose(dataset_new); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(file_new); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close old dataset information */ - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close old file information */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); -} /* end test_misc10() */ -#endif - -/**************************************************************** -** -** test_misc11(): Test that all properties in a file creation property -** list are stored correctly in the file and can be retrieved -** when the file is re-opened. -** -****************************************************************/ -static void -test_misc11(void) -{ - hid_t file; /* File IDs for old & new files */ - hid_t fcpl; /* File creation property list */ - hsize_t userblock; /* Userblock size retrieved from FCPL */ - size_t off_size; /* Size of offsets in the file */ - size_t len_size; /* Size of lengths in the file */ - unsigned sym_ik; /* Symbol table B-tree initial 'K' value */ - unsigned istore_ik; /* Indexed storage B-tree initial 'K' value */ - unsigned sym_lk; /* Symbol table B-tree leaf 'K' value */ - unsigned nindexes; /* Shared message number of indexes */ -#if 0 - H5F_info2_t finfo; /* global information about file */ -#endif - H5F_fspace_strategy_t strategy; /* File space strategy */ - hsize_t threshold; /* Free-space section threshold */ - bool persist; /* To persist free-space or not */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing file creation properties retrieved correctly\n")); - - /* Creating a file with the default file creation property list should - * create a version 0 superblock - */ - - /* Create file with default file creation property list */ - file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); -#if 0 - /* Get the file's version information */ - ret = H5Fget_info2(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info2"); - VERIFY(finfo.super.version, 0, "H5Fget_info2"); - VERIFY(finfo.free.version, 0, "H5Fget_info2"); - VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); -#endif - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create a file creation property list */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); - - /* Set all the properties in the FCPL */ - ret = H5Pset_userblock(fcpl, (hsize_t)MISC11_USERBLOCK); - CHECK(ret, FAIL, "H5Pset_userblock"); - - ret = H5Pset_sizes(fcpl, (size_t)MISC11_SIZEOF_OFF, (size_t)MISC11_SIZEOF_LEN); - CHECK(ret, FAIL, "H5Pset_sizes"); - - /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */ - H5E_BEGIN_TRY - { - ret = H5Pset_sym_k(fcpl, 32770, 0); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_sym_k"); - - ret = H5Pset_sym_k(fcpl, MISC11_SYM_IK, MISC11_SYM_LK); - CHECK(ret, FAIL, "H5Pset_sym_k"); - - /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */ - H5E_BEGIN_TRY - { - ret = H5Pset_istore_k(fcpl, 32770); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pset_istore_k"); - - ret = H5Pset_istore_k(fcpl, MISC11_ISTORE_IK); - CHECK(ret, FAIL, "H5Pset_istore_k"); - - ret = H5Pset_shared_mesg_nindexes(fcpl, MISC11_NINDEXES); - CHECK(ret, FAIL, "H5Pset_shared_mesg"); - - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5Pset_file_space"); - - /* Creating a file with the non-default file creation property list should - * create a version 2 superblock - */ - - /* Create file with custom file creation property list */ - file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Close FCPL */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); -#if 0 - /* Get the file's version information */ - ret = H5Fget_info2(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info2"); - VERIFY(finfo.super.version, 2, "H5Fget_info2"); - VERIFY(finfo.free.version, 0, "H5Fget_info2"); - VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); -#endif - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - file = H5Fopen(MISC11_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Get the file's creation property list */ - fcpl = H5Fget_create_plist(file); - CHECK(fcpl, FAIL, "H5Fget_create_plist"); -#if 0 - /* Get the file's version information */ - ret = H5Fget_info2(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info2"); - VERIFY(finfo.super.version, 2, "H5Fget_info2"); - VERIFY(finfo.free.version, 0, "H5Fget_info2"); - VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); -#endif - /* Retrieve all the property values & check them */ - ret = H5Pget_userblock(fcpl, &userblock); - CHECK(ret, FAIL, "H5Pget_userblock"); - VERIFY(userblock, MISC11_USERBLOCK, "H5Pget_userblock"); - - ret = H5Pget_sizes(fcpl, &off_size, &len_size); - CHECK(ret, FAIL, "H5Pget_sizes"); - VERIFY(off_size, MISC11_SIZEOF_OFF, "H5Pget_sizes"); - VERIFY(len_size, MISC11_SIZEOF_LEN, "H5Pget_sizes"); - - ret = H5Pget_sym_k(fcpl, &sym_ik, &sym_lk); - CHECK(ret, FAIL, "H5Pget_sym_k"); - VERIFY(sym_ik, MISC11_SYM_IK, "H5Pget_sym_k"); - VERIFY(sym_lk, MISC11_SYM_LK, "H5Pget_sym_k"); - - ret = H5Pget_istore_k(fcpl, &istore_ik); - CHECK(ret, FAIL, "H5Pget_istore_k"); - VERIFY(istore_ik, MISC11_ISTORE_IK, "H5Pget_istore_k"); - - ret = H5Pget_shared_mesg_nindexes(fcpl, &nindexes); - CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes"); - VERIFY(nindexes, MISC11_NINDEXES, "H5Pget_shared_mesg_nindexes"); - - ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - VERIFY(strategy, 3, "H5Pget_file_space_strategy"); - VERIFY(persist, false, "H5Pget_file_space_strategy"); - VERIFY(threshold, 1, "H5Pget_file_space_strategy"); - - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close FCPL */ - ret = H5Pclose(fcpl); - CHECK(ret, FAIL, "H5Pclose"); -} /* end test_misc11() */ - -/**************************************************************** -** -** test_misc12(): Test that VL-types operate correctly in chunked -** datasets that are extended. -** -****************************************************************/ -static void -test_misc12(void) -{ - const char *wdata[MISC12_SPACE1_DIM1] = { - "Four score and seven years ago our forefathers brought forth on this continent a new nation,", - "conceived in liberty and dedicated to the proposition that all men are created equal.", - "Now we are engaged in a great civil war,", - "testing whether that nation or any nation so conceived and so dedicated can long endure."}; - const char *wdata1[MISC12_APPEND_SIZE] = { - "O Gloria inmarcesible! O Jubilo inmortal! En surcos de dolores, el", - "bien germina ya! Ceso la horrible noche, La libertad sublime", - "derrama las auroras de su invencible luz.", "La humanidad entera, que entre cadenas gime, comprende", - "las palabras del que murio en la cruz."}; - char *rdata[MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE]; /* Information read in */ - hid_t fid1; - hid_t dataset; - hid_t sid1, space, memspace; - hid_t tid1, cparms; - hsize_t dims1[] = {MISC12_SPACE1_DIM1}; - hsize_t dimsn[] = {MISC12_APPEND_SIZE}; - hsize_t maxdims1[1] = {H5S_UNLIMITED}; - hsize_t chkdims1[1] = {MISC12_CHUNK_SIZE}; - hsize_t newsize[1] = {MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE}; - hsize_t offset[1] = {MISC12_SPACE1_DIM1}; - hsize_t count[1] = {MISC12_APPEND_SIZE}; - int i; /* counting variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing VL-type in chunked dataset\n")); - - /* This test requirese a relatively "fresh" library environment */ - ret = H5garbage_collect(); - CHECK(ret, FAIL, "H5garbage_collect"); - - /* Create file */ - fid1 = H5Fcreate(MISC12_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(MISC12_SPACE1_RANK, dims1, maxdims1); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid1 = H5Tcopy(H5T_C_S1); - CHECK(tid1, FAIL, "H5Tcopy"); - - ret = H5Tset_size(tid1, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - cparms = H5Pcreate(H5P_DATASET_CREATE); - CHECK(cparms, FAIL, "H5Pcreate"); - - ret = H5Pset_chunk(cparms, 1, chkdims1); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, MISC12_DSET_NAME, tid1, sid1, H5P_DEFAULT, cparms, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Extend dataset */ - ret = H5Dset_extent(dataset, newsize); - CHECK(ret, FAIL, "H5Dset_extent"); - - memspace = H5Screate_simple(MISC12_SPACE1_RANK, dimsn, NULL); - CHECK(memspace, FAIL, "H5Screate_simple"); - - space = H5Dget_space(dataset); - CHECK(space, FAIL, "H5Dget_space"); - - ret = H5Sselect_hyperslab(space, H5S_SELECT_SET, offset, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write data to new portion of dataset */ - ret = H5Dwrite(dataset, tid1, memspace, space, H5P_DEFAULT, wdata1); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read all data back */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - for (i = 0; i < MISC12_SPACE1_DIM1; i++) - if (strcmp(wdata[i], rdata[i]) != 0) - TestErrPrintf("Error on line %d: wdata[%d]=%s, rdata[%d]=%s\n", __LINE__, i, wdata[i], i, - rdata[i]); - for (; i < (MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE); i++) - if (strcmp(wdata1[i - MISC12_SPACE1_DIM1], rdata[i]) != 0) - TestErrPrintf("Error on line %d: wdata1[%d]=%s, rdata[%d]=%s\n", __LINE__, i - MISC12_SPACE1_DIM1, - wdata1[i - MISC12_SPACE1_DIM1], i, rdata[i]); - - ret = H5Sselect_all(space); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Reclaim VL data memory */ - ret = H5Treclaim(tid1, space, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Everything */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(memspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(cparms); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc12() */ -#if 0 -/* Various routines for misc. 13 test */ -static void -misc13_init_data(unsigned *original_data) -{ - unsigned u; - - for (u = 0; u < MISC13_DIM1; u++) - original_data[u] = u; -} - -static bool -misc13_verify_data_match(const unsigned *original_data, const unsigned *read_data) -{ - unsigned u; - - for (u = 0; u < MISC13_DIM1; u++) - if (original_data[u] != read_data[u]) - return false; - - return true; -} - -static void -misc13_create_dataset(hid_t loc_id, const char *name, hid_t dcpl, const unsigned *data) -{ - hid_t dsid = -1; /* Dataset ID */ - hid_t sid = -1; /* Dataspace ID */ - hsize_t dims[MISC13_RANK]; /* Dataset dimensions */ - herr_t ret; /* Generic return value */ - - /* Create dataspace for use with dataset */ - dims[0] = MISC13_DIM1; - sid = H5Screate_simple(MISC13_RANK, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create contiguous dataset in root group */ - dsid = H5Dcreate2(loc_id, name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dcreate2"); - - /* Write some data to dataset */ - ret = H5Dwrite(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the contiguous dataset */ - ret = H5Dclose(dsid); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* end misc13_create_dataset() */ - -static void -misc13_verify_dataset(hid_t loc_id, const char *name, const unsigned *data) -{ - unsigned *read_data = NULL; /* Data to write to dataset */ - hid_t dsid = -1; /* Dataset ID */ - herr_t ret; /* Generic return value */ - - /* Create a data buffer for the dataset read */ - read_data = (unsigned *)calloc(MISC13_DIM1, sizeof(unsigned)); - CHECK_PTR(read_data, "calloc"); - - /* Open the contiguous dataset in the root group */ - dsid = H5Dopen2(loc_id, name, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dopen2"); - - /* Read the data */ - ret = H5Dread(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_data); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify that the data are correct */ - ret = misc13_verify_data_match(data, read_data); - CHECK(ret, FAIL, "misc13_verify_data_match"); - - /* Close the contiguous dataset */ - ret = H5Dclose(dsid); - CHECK(ret, FAIL, "H5Dclose"); - - /* Free the dataset read buffer */ - free(read_data); - -} /* end misc13_verify_dataset() */ - -static void -misc13_create_hdf_file(const char *name, const unsigned *data) -{ - hid_t fid = -1; /* File ID */ - hid_t gid1 = -1; /* Group ID (level 1) */ - hid_t gid2 = -1; /* Group ID (level 2) */ - hid_t tid = -1; /* Datatype ID */ - hid_t dcplid = -1; /* Dataset creation property list ID */ - hsize_t chunk_dims[MISC13_RANK]; /* Chunk dimensions */ - herr_t ret; /* Generic return value */ - - /* Create file */ - fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create DCPL for use with datasets */ - dcplid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcplid, FAIL, "H5Pcreate"); - - /* Set the DCPL to be chunked */ - ret = H5Pset_layout(dcplid, H5D_CHUNKED); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Use chunked storage for this DCPL */ - chunk_dims[0] = MISC13_CHUNK_DIM1; - ret = H5Pset_chunk(dcplid, MISC13_RANK, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create contiguous dataset in root group */ - misc13_create_dataset(fid, MISC13_DSET1_NAME, H5P_DEFAULT, data); - - /* Create chunked dataset in root group */ - misc13_create_dataset(fid, MISC13_DSET2_NAME, dcplid, data); - - /* Create a datatype to commit to the file */ - tid = H5Tcopy(H5T_NATIVE_INT); - CHECK(tid, FAIL, "H5Tcopy"); - - /* Create a named datatype in the root group */ - ret = H5Tcommit2(fid, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close named datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Create a group in the root group */ - gid1 = H5Gcreate2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gcreate2"); - - /* Create another group in the new group */ - gid2 = H5Gcreate2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gcreate2"); - - /* Close the second group */ - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create contiguous dataset in new group */ - misc13_create_dataset(gid1, MISC13_DSET1_NAME, H5P_DEFAULT, data); - - /* Create chunked dataset in new group */ - misc13_create_dataset(gid1, MISC13_DSET2_NAME, dcplid, data); - - /* Create a datatype to commit to the new group */ - tid = H5Tcopy(H5T_NATIVE_INT); - CHECK(tid, FAIL, "H5Tcopy"); - - /* Create a named datatype in the new group */ - ret = H5Tcommit2(gid1, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close named datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close the first group */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the DCPL */ - ret = H5Pclose(dcplid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end misc13_create_hdf_file() */ - -static void -misc13_insert_user_block(const char *old_name, const char *new_name, const char *str, size_t size) -{ - FILE *new_fp = NULL; /* Pointers to new & old files */ - FILE *old_fp = NULL; - void *user_block = NULL; /* Pointer to user block to write to file */ - void *copy_buf = NULL; /* Pointer to buffer for copying data */ - size_t written; /* Amount of data written to new file */ - size_t read_in; /* Amount of data read in from old file */ - int ret; /* Generic status value */ - - /* Allocate space for the user block */ - user_block = calloc(size, (size_t)1); - CHECK_PTR(user_block, "calloc"); - - /* Copy in the user block data */ - memcpy(user_block, str, strlen(str)); - - /* Open the new file */ - new_fp = fopen(new_name, "wb"); - CHECK_PTR(new_fp, "fopen"); - - /* Write the user block to the new file */ - written = fwrite(user_block, (size_t)1, size, new_fp); - VERIFY(written, size, "fwrite"); - - /* Open the old file */ - old_fp = fopen(old_name, "rb"); - CHECK_PTR(old_fp, "fopen"); - - /* Allocate space for the copy buffer */ - copy_buf = malloc((size_t)MISC13_COPY_BUF_SIZE); - CHECK_PTR(copy_buf, "malloc"); - - /* Copy data from the old file to the new file */ - while ((read_in = fread(copy_buf, (size_t)1, (size_t)MISC13_COPY_BUF_SIZE, old_fp)) > 0) { - /* Write the data to the new file */ - written = fwrite(copy_buf, (size_t)1, read_in, new_fp); - VERIFY(written, read_in, "fwrite"); - } - - /* Close the old file */ - ret = fclose(old_fp); - VERIFY(ret, 0, "fclose"); - - /* Close the new file */ - ret = fclose(new_fp); - VERIFY(ret, 0, "fclose"); - - /* Free the copy buffer */ - free(copy_buf); - - /* Free the user block */ - free(user_block); - -} /* end misc13_insert_user_block() */ - -static void -misc13_verify_file(const char *name, const unsigned *data, hsize_t userblock_size, - bool check_for_new_dataset) -{ - hid_t fid = -1; /* File ID */ - hid_t gid1 = -1; /* Group IDs */ - hid_t gid2 = -1; /* Group IDs */ - hid_t tid = -1; /* Datatype ID */ - hid_t fcplid = -1; /* File creation property list ID */ - hsize_t ub_size_out; /* Userblock size retrieved from FCPL */ - herr_t ret; /* Generic return value */ - - /* Open the file */ - fid = H5Fopen(name, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Get the file's FCPL */ - fcplid = H5Fget_create_plist(fid); - CHECK(fcplid, FAIL, "H5Fget_create_plist"); - - /* Get the user block size for the file */ - ret = H5Pget_userblock(fcplid, &ub_size_out); - CHECK(ret, FAIL, "H5Pget_userblock"); - - /* Check the userblock size */ - VERIFY(userblock_size, ub_size_out, "H5Pget_userblock"); - - /* Close the FCPL */ - ret = H5Pclose(fcplid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Verify the contiguous dataset in the root group */ - misc13_verify_dataset(fid, MISC13_DSET1_NAME, data); - - /* Verify the chunked dataset in the root group */ - misc13_verify_dataset(fid, MISC13_DSET2_NAME, data); - - /* Verify the "new" contiguous dataset in the root group, if asked */ - if (check_for_new_dataset) - misc13_verify_dataset(fid, MISC13_DSET3_NAME, data); - - /* Open the named datatype in the root group */ - tid = H5Topen2(fid, MISC13_DTYPE_NAME, H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - - /* Verify the type is correct */ - VERIFY(H5Tequal(tid, H5T_NATIVE_INT), true, "H5Tequal"); - - /* Close named datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open the first group */ - gid1 = H5Gopen2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT); - CHECK(gid1, FAIL, "H5Gopen2"); - - /* Verify the contiguous dataset in the first group */ - misc13_verify_dataset(gid1, MISC13_DSET1_NAME, data); - - /* Verify the chunked dataset in the first group */ - misc13_verify_dataset(gid1, MISC13_DSET2_NAME, data); - - /* Open the named datatype in the first group */ - tid = H5Topen2(gid1, MISC13_DTYPE_NAME, H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - - /* Verify the type is correct */ - VERIFY(H5Tequal(tid, H5T_NATIVE_INT), true, "H5Tequal"); - - /* Close named datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open the second group */ - gid2 = H5Gopen2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gopen2"); - - /* Close the second group */ - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the first group */ - ret = H5Gclose(gid1); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end misc13_verify_file() */ - -static void -misc13_add_to_new_file(const char *name, const unsigned *data) -{ - hid_t fid = -1; /* File ID */ - herr_t ret; /* Generic return value */ - - /* Open the file */ - fid = H5Fopen(name, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create new contiguous dataset in root group */ - misc13_create_dataset(fid, MISC13_DSET3_NAME, H5P_DEFAULT, data); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end misc13_add_to_new_file() */ - -/**************************************************************** -** -** test_misc13(): Test that file contents can be "slid down" by -** inserting a user block in front of an existing file. -** -****************************************************************/ -static void -test_misc13(void) -{ - unsigned *data = NULL; /* Data to write to dataset */ - hsize_t userblock_size; /* Correct size of userblock */ - bool check_for_new_dataset; /* Whether to check for the post-userblock-creation dataset */ - - /* Create a data buffer for the datasets */ - data = (unsigned *)calloc(MISC13_DIM1, sizeof(unsigned)); - CHECK_PTR(data, "calloc"); - - /* Initialize data to write */ - misc13_init_data(data); - - /* Create first file, with no user block */ - misc13_create_hdf_file(MISC13_FILE_1, data); - - /* Verify file contents are correct */ - userblock_size = 0; - check_for_new_dataset = false; - misc13_verify_file(MISC13_FILE_1, data, userblock_size, check_for_new_dataset); - - /* Create a new file by inserting a user block in front of the first file */ - misc13_insert_user_block(MISC13_FILE_1, MISC13_FILE_2, "Test String", (size_t)MISC13_USERBLOCK_SIZE); - - /* Verify file contents are still correct */ - userblock_size = MISC13_USERBLOCK_SIZE; - check_for_new_dataset = false; - misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset); - - /* Make certain we can modify the new file */ - misc13_add_to_new_file(MISC13_FILE_2, data); - - /* Verify file contents are still correct */ - userblock_size = MISC13_USERBLOCK_SIZE; - check_for_new_dataset = true; - misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset); - - /* Free the dataset buffer */ - free(data); - -} /* end test_misc13() */ -#endif - -/**************************************************************** -** -** test_misc14(): Test that file contents can be "slid down" by -** inserting a user block in front of an existing file. -** -****************************************************************/ -static void -test_misc14(void) -{ - hid_t file_id; /* File ID */ - hid_t fapl; /* File access property list ID */ - hid_t DataSpace; /* Dataspace ID */ - hid_t Dataset1; /* Dataset ID #1 */ - hid_t Dataset2; /* Dataset ID #2 */ - hid_t Dataset3; /* Dataset ID #3 */ - double data1 = 5.0; /* Data to write for dataset #1 */ - double data2 = 10.0; /* Data to write for dataset #2 */ - double data3 = 15.0; /* Data to write for dataset #3 */ - double rdata; /* Data read in */ - herr_t ret; /* Generic return value */ - - /* Test creating two datasets and deleting the second */ - - /* Increase the metadata block size */ - /* (This makes certain that all the data blocks are allocated together) */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - ret = H5Pset_meta_block_size(fapl, (hsize_t)MISC14_METADATA_SIZE); - CHECK(ret, FAIL, "H5Pset_meta_block_size"); - - /* Create dataspace to use */ - DataSpace = H5Screate(H5S_SCALAR); - CHECK(DataSpace, FAIL, "H5Screate"); - - /* Open the file */ - file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Create first dataset & write data */ - Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(Dataset1, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Create second dataset (to be unlinked). */ - Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(Dataset2, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check data from first dataset */ - ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data1)) - TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); - - /* Unlink second dataset */ - ret = H5Ldelete(file_id, MISC14_DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close second dataset */ - ret = H5Dclose(Dataset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Verify the data from dataset #1 */ - ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data1)) - TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); - - /* Close first dataset */ - ret = H5Dclose(Dataset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Test creating two datasets and deleting the first */ - - /* Open the file */ - file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Create first dataset & write data */ - Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(Dataset1, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Create second dataset */ - Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(Dataset2, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check data from second dataset */ - ret = H5Dread(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data2)) - TestErrPrintf("Error on line %d: data2!=rdata\n", __LINE__); - - /* Unlink first dataset */ - ret = H5Ldelete(file_id, MISC14_DSET1_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close first dataset */ - ret = H5Dclose(Dataset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Verify the data from dataset #2 */ - ret = H5Dread(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data2)) - TestErrPrintf("Error on line %d: data2!=rdata\n", __LINE__); - - /* Close second dataset */ - ret = H5Dclose(Dataset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Test creating three datasets and deleting the second */ - - /* Open the file */ - file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Create first dataset & write data */ - Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(Dataset1, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Create second dataset */ - Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(Dataset2, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Create third dataset */ - Dataset3 = H5Dcreate2(file_id, MISC14_DSET3_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(Dataset2, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data3); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Check data from first dataset */ - ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data1)) - TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); - - /* Check data from third dataset */ - ret = H5Dread(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data3)) - TestErrPrintf("Error on line %d: data3!=rdata\n", __LINE__); - - /* Unlink second dataset */ - ret = H5Ldelete(file_id, MISC14_DSET2_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close second dataset */ - ret = H5Dclose(Dataset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Verify the data from dataset #1 */ - ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data1)) - TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__); - - /* Verify the data from dataset #3 */ - ret = H5Dread(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata); - CHECK(ret, FAIL, "H5Dread"); - if (!H5_DBL_ABS_EQUAL(rdata, data3)) - TestErrPrintf("Error on line %d: data3!=rdata\n", __LINE__); - - /* Close first dataset */ - ret = H5Dclose(Dataset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close third dataset */ - ret = H5Dclose(Dataset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close shared objects (dataspace & fapl) */ - ret = H5Sclose(DataSpace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - -} /* end test_misc14() */ - -/**************************************************************** -** -** test_misc15(): Test that checking a file's access property list -** more than once correctly increments internal reference counts. -** -****************************************************************/ -static void -test_misc15(void) -{ - char filename[MISC15_BUF_SIZE]; - hid_t file; /* File ID */ - hid_t fapl; /* File access property list */ - herr_t ret; /* Generic return value */ - - fapl = h5_fileaccess(); - h5_fixname(MISC15_FILE, fapl, filename, MISC15_BUF_SIZE); - - /* Create the file & get it's FAPL */ - file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(file, FAIL, "H5Fcreate"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - fapl = H5Fget_access_plist(file); - CHECK(fapl, FAIL, "H5Fget_access_plist"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file & get it's FAPL again */ - file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - - fapl = H5Fget_access_plist(file); - CHECK(fapl, FAIL, "H5Fget_access_plist"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Verify that the file is still OK */ - ret = H5Fis_accessible(filename, fapl); - CHECK(ret, FAIL, "H5Fis_accessible"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc15() */ - -/**************************************************************** -** -** test_misc16(): Test array of NULL-terminated -** fixed-length string. It creates a dataset of fixed-length -** strings. Each string is MISC16_STR_SIZE long. There are -** totally MISC16_SPACE_DIM by MISC16_SPACE_RANK strings. -** -****************************************************************/ -static void -test_misc16(void) -{ - hid_t file; /* File ID */ - herr_t ret; /* Generic return value */ - char wdata[MISC16_SPACE_DIM][MISC16_STR_SIZE]; - char rdata[MISC16_SPACE_DIM][MISC16_STR_SIZE]; /* Information read in */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims[] = {MISC16_SPACE_DIM}; - int i; - - memset(wdata, 0, sizeof(wdata)); - memset(rdata, 0, sizeof(rdata)); - - /* Initialize the data */ - /* (Note that these are supposed to stress the code, so are a little weird) */ - memcpy(wdata[0], "1234567", MISC16_STR_SIZE); - memcpy(wdata[1], "1234567\0", MISC16_STR_SIZE); - memcpy(wdata[2], "12345678", MISC16_STR_SIZE); - memcpy(wdata[3], "\0\0\0\0\0\0\0\0", MISC16_STR_SIZE); - - /* Create the file */ - file = H5Fcreate(MISC16_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(MISC16_SPACE_RANK, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcopy"); - - ret = H5Tset_size(tid, (size_t)MISC16_STR_SIZE); - CHECK(ret, FAIL, "H5Tset_size"); - - /*ret = H5Tset_strpad(tid,H5T_STR_NULLPAD); - CHECK(ret, FAIL, "H5Tset_strpad");*/ - - /* Create a dataset */ - dataset = H5Dcreate2(file, MISC16_DSET_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < MISC16_SPACE_DIM; i++) { - if (strlen(wdata[i]) != strlen(rdata[i])) { - TestErrPrintf( - "Line %u: VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", - (unsigned)__LINE__, (int)i, (int)strlen(wdata[i]), (int)i, (int)strlen(rdata[i])); - continue; - } /* end if */ - if (strcmp(wdata[i], rdata[i]) != 0) { - TestErrPrintf("Line %u: VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", - (unsigned)__LINE__, (int)i, wdata[i], (int)i, rdata[i]); - continue; - } /* end if */ - } /* end for */ - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc16() */ - -/**************************************************************** -** -** test_misc17(): Test array of characters. It creates a dataset -** of ASCII characters, with dimensionality of MISC17_SPACE_DIM1 -** by MISC17_SPACE_DIM2. -** -****************************************************************/ -static void -test_misc17(void) -{ - hid_t file; /* File ID */ - herr_t ret; /* Generic return value */ - char wdata[MISC17_SPACE_DIM1][MISC17_SPACE_DIM2]; - char rdata[MISC17_SPACE_DIM1][MISC17_SPACE_DIM2]; /* Information read in */ - hid_t dataset; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims[] = {MISC17_SPACE_DIM1, MISC17_SPACE_DIM2}; - int i; - - memset(wdata, 0, sizeof(wdata)); - memset(rdata, 0, sizeof(rdata)); - - /* Initialize the data */ - /* (Note that these are supposed to stress the code, so are a little weird) */ - memcpy(wdata[0], "1234567", MISC17_SPACE_DIM2); - memcpy(wdata[1], "1234567\0", MISC17_SPACE_DIM2); - memcpy(wdata[2], "12345678", MISC17_SPACE_DIM2); - memcpy(wdata[3], "\0\0\0\0\0\0\0\0", MISC17_SPACE_DIM2); - - /* Create the file */ - file = H5Fcreate(MISC17_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(MISC17_SPACE_RANK, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcopy"); - - ret = H5Tset_strpad(tid, H5T_STR_NULLPAD); - CHECK(ret, FAIL, "H5Tset_strpad"); - - /* Create a dataset */ - dataset = H5Dcreate2(file, MISC17_DSET_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data in the way of strings. */ - for (i = 0; i < MISC17_SPACE_DIM1; i++) { - if (strlen(wdata[i]) != strlen(rdata[i])) { - TestErrPrintf( - "Line %u: VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", - (unsigned)__LINE__, (int)i, (int)strlen(wdata[i]), (int)i, (int)strlen(rdata[i])); - continue; - } /* end if */ - if (strcmp(wdata[i], rdata[i]) != 0) { - TestErrPrintf("Line %u: VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", - (unsigned)__LINE__, (int)i, wdata[i], (int)i, rdata[i]); - continue; - } /* end if */ - } /* end for */ - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc17() */ - -/**************************************************************** -** -** test_misc18(): Test new object header information in H5O_info_t -** struct. -** -****************************************************************/ -static void -test_misc18(void) -{ - hid_t fid; /* File ID */ - hid_t sid; /* 'Space ID */ - hid_t did1, did2; /* Dataset IDs */ - hid_t aid; /* Attribute ID */ -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - H5O_info1_t old_oinfo; /* (deprecated) information about object */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -#endif - H5O_info2_t oinfo; /* Data model information about object */ -#if 0 - H5O_native_info_t ninfo; /* Native file format information about object */ -#endif - char attr_name[32]; /* Attribute name buffer */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Create the file */ - fid = H5Fcreate(MISC18_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace for attributes */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create first dataset */ - did1 = H5Dcreate2(fid, MISC18_DSET1_NAME, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did1, FAIL, "H5Dcreate2"); - - /* Get object information */ - ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name"); -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); -#endif - - /* Create second dataset */ - did2 = H5Dcreate2(fid, MISC18_DSET2_NAME, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did2, FAIL, "H5Dcreate2"); - - /* Get object information */ - ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name"); -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); -#endif - - /* Loop creating attributes on each dataset, flushing them to the file each time */ - for (u = 0; u < 10; u++) { - /* Set up attribute name */ - snprintf(attr_name, sizeof(attr_name), "Attr %u", u); - - /* Create & close attribute on first dataset */ - aid = H5Acreate2(did1, attr_name, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create & close attribute on second dataset */ - aid = H5Acreate2(did2, attr_name, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Flush file, to 'fix' size of dataset object headers */ - ret = H5Fflush(fid, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - } /* end for */ - - /* Get object information for dataset #1 now */ - ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name"); -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); -#endif - - /* Get object information for dataset #2 now */ - ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name"); -#if 0 -#ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_mative_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); -#endif - - /* Close second dataset */ - ret = H5Dclose(did2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close first dataset */ - ret = H5Dclose(did1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc18() */ - -/**************************************************************** -** -** test_misc19(): Test incrementing & decrementing ref count on IDs -** -****************************************************************/ -static void -test_misc19(void) -{ - hid_t fid = -1; /* File ID */ - hid_t sid = -1; /* Dataspace ID */ - hid_t did = -1; /* Dataset ID */ - hid_t tid = -1; /* Datatype ID */ - hid_t aid = -1; /* Attribute ID */ - hid_t plid = -1; /* Property List ID */ - hid_t pcid = -1; /* Property Class ID */ - hid_t gid = -1; /* Group ID */ - hid_t ecid = -1; /* Error Class ID */ - hid_t emid = -1; /* Error Message ID */ - hid_t esid = -1; /* Error Stack ID */ -#if 0 - hid_t vfdid = -1; /* Virtual File Driver ID */ - hid_t volid = -1; /* Virtual Object Layer ID */ - H5FD_class_t *vfd_cls = NULL; /* VFD class */ - H5VL_class_t *vol_cls = NULL; /* VOL class */ -#endif - int rc; /* Reference count */ - herr_t ret; /* Generic return value */ - - /* Check H5I operations on files */ - - /* Create the file */ - fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Check the reference count */ - rc = H5Iget_ref(fid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(fid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the file normally */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Check the reference count */ - rc = H5Iget_ref(fid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the file by decrementing the reference count */ - rc = H5Idec_ref(fid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the file again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Fclose(fid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fclose"); - - /* Check H5I operations on property lists */ - - /* Create the property list */ - plid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plid, FAIL, "H5Pcreate"); - - /* Check the reference count */ - rc = H5Iget_ref(plid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(plid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the property list normally */ - ret = H5Pclose(plid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Check the reference count */ - rc = H5Iget_ref(plid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the property list by decrementing the reference count */ - rc = H5Idec_ref(plid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the property list again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pclose(plid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pclose"); - - /* Check H5I operations on property classes */ - - /* Create a property class */ - pcid = H5Pcreate_class(H5P_DATASET_CREATE, "foo", NULL, NULL, NULL, NULL, NULL, NULL); - CHECK(pcid, FAIL, "H5Pcreate_class"); - - /* Check the reference count */ - rc = H5Iget_ref(pcid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(pcid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the property class normally */ - ret = H5Pclose_class(pcid); - CHECK(ret, FAIL, "H5Pclose_class"); - - /* Check the reference count */ - rc = H5Iget_ref(pcid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the property class by decrementing the reference count */ - rc = H5Idec_ref(pcid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the property class again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Pclose_class(pcid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Pclose_class"); - - /* Check H5I operations on datatypes */ - - /* Create a datatype */ - tid = H5Tcreate(H5T_OPAQUE, (size_t)16); - CHECK(tid, FAIL, "H5Tcreate"); - - /* Check the reference count */ - rc = H5Iget_ref(tid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(tid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the datatype normally */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Check the reference count */ - rc = H5Iget_ref(tid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the datatype by decrementing the reference count */ - rc = H5Idec_ref(tid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the datatype again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Tclose(tid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Tclose"); - - /* Check H5I operations on dataspaces */ - - /* Create a dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Check the reference count */ - rc = H5Iget_ref(sid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(sid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the dataspace normally */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Check the reference count */ - rc = H5Iget_ref(sid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the dataspace by decrementing the reference count */ - rc = H5Idec_ref(sid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the dataspace again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Sclose(sid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sclose"); - - /* Check H5I operations on datasets */ - - /* Create a file */ - fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a dataset */ - did = H5Dcreate2(fid, MISC19_DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Check the reference count */ - rc = H5Iget_ref(did); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(did); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the dataset normally */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Check the reference count */ - rc = H5Iget_ref(did); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the dataset by decrementing the reference count */ - rc = H5Idec_ref(did); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the dataset again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Dclose(did); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Check H5I operations on attributes */ - - /* Create a file */ - fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Open the root group */ - gid = H5Gopen2(fid, "/", H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create a dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create an attribute */ - aid = H5Acreate2(gid, MISC19_ATTR_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Check the reference count */ - rc = H5Iget_ref(aid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(aid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the dataset normally */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Check the reference count */ - rc = H5Iget_ref(aid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the attribute by decrementing the reference count */ - rc = H5Idec_ref(aid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the attribute again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Aclose(aid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aclose"); - - /* Close the root group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Check H5I operations on groups */ - - /* Create a file */ - fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a group */ - gid = H5Gcreate2(fid, MISC19_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Check the reference count */ - rc = H5Iget_ref(gid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(gid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the group normally */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Check the reference count */ - rc = H5Iget_ref(gid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the group by decrementing the reference count */ - rc = H5Idec_ref(gid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the group again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Gclose(gid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Gclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Check H5I operations on error classes */ - - /* Create an error class */ - ecid = H5Eregister_class("foo", "bar", "baz"); - CHECK(ecid, FAIL, "H5Eregister_class"); - - /* Check the reference count */ - rc = H5Iget_ref(ecid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(ecid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the error class normally */ - ret = H5Eunregister_class(ecid); - CHECK(ret, FAIL, "H5Eunregister_class"); - - /* Check the reference count */ - rc = H5Iget_ref(ecid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the error class by decrementing the reference count */ - rc = H5Idec_ref(ecid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the error class again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Eunregister_class(ecid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Eunregister_class"); - - /* Check H5I operations on error messages */ - - /* Create an error class */ - ecid = H5Eregister_class("foo", "bar", "baz"); - CHECK(ecid, FAIL, "H5Eregister_class"); - - /* Create an error message */ - emid = H5Ecreate_msg(ecid, H5E_MAJOR, "mumble"); - CHECK(emid, FAIL, "H5Ecreate_msg"); - - /* Check the reference count */ - rc = H5Iget_ref(emid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(emid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the error message normally */ - ret = H5Eclose_msg(emid); - CHECK(ret, FAIL, "H5Eclose_msg"); - - /* Check the reference count */ - rc = H5Iget_ref(emid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the error message by decrementing the reference count */ - rc = H5Idec_ref(emid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the error message again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Eclose_msg(emid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Eclose_msg"); - - /* Close the error class */ - ret = H5Eunregister_class(ecid); - CHECK(ret, FAIL, "H5Eunregister_class"); - - /* Check H5I operations on error stacks */ - - /* Create an error stack */ - esid = H5Eget_current_stack(); - CHECK(esid, FAIL, "H5Eget_current_stack"); - - /* Check the reference count */ - rc = H5Iget_ref(esid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Inc the reference count */ - rc = H5Iinc_ref(esid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Close the error stack normally */ - ret = H5Eclose_stack(esid); - CHECK(ret, FAIL, "H5Eclose_stack"); - - /* Check the reference count */ - rc = H5Iget_ref(esid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Close the error stack by decrementing the reference count */ - rc = H5Idec_ref(esid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try closing the error stack again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Eclose_stack(esid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Eclose_stack"); - -#if 0 - /* Check H5I operations on virtual file drivers */ - - /* Get a VFD class to register */ - vfd_cls = h5_get_dummy_vfd_class(); - CHECK_PTR(vfd_cls, "h5_get_dummy_vfd_class"); - - /* Register a virtual file driver */ - vfdid = H5FDregister(vfd_cls); - CHECK(vfdid, FAIL, "H5FDregister"); - - /* Check the reference count */ - rc = H5Iget_ref(vfdid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Increment the reference count */ - rc = H5Iinc_ref(vfdid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Unregister the VFD normally */ - ret = H5FDunregister(vfdid); - CHECK(ret, FAIL, "H5FDunregister"); - - /* Check the reference count */ - rc = H5Iget_ref(vfdid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Unregister the VFD by decrementing the reference count */ - rc = H5Idec_ref(vfdid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try unregistering the VFD again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5FDunregister(vfdid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5FDunregister"); - - free(vfd_cls); - - /* Check H5I operations on virtual object connectors */ - - /* Get a VOL class to register */ - vol_cls = h5_get_dummy_vol_class(); - CHECK_PTR(vol_cls, "h5_get_dummy_vol_class"); - - /* Register a VOL connector */ - volid = H5VLregister_connector(vol_cls, H5P_DEFAULT); - CHECK(volid, FAIL, "H5VLregister_connector"); - - /* Check the reference count */ - rc = H5Iget_ref(volid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Increment the reference count */ - rc = H5Iinc_ref(volid); - VERIFY(rc, 2, "H5Iinc_ref"); - - /* Unregister the VOL connector normally */ - ret = H5VLunregister_connector(volid); - CHECK(ret, FAIL, "H5VLunregister_connector"); - - /* Check the reference count */ - rc = H5Iget_ref(volid); - VERIFY(rc, 1, "H5Iget_ref"); - - /* Unregister the VOL connector by decrementing the reference count */ - rc = H5Idec_ref(volid); - VERIFY(rc, 0, "H5Idec_ref"); - - /* Try unregistering the VOL connector again (should fail) */ - H5E_BEGIN_TRY - { - ret = H5VLunregister_connector(volid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5VLunregister_connector"); - - free(vol_cls); -#endif -} /* end test_misc19() */ - -/**************************************************************** -** -** test_misc20(): Test problems with version 2 of storage layout -** message truncating dimensions -** -****************************************************************/ -#if 0 -static void -test_misc20(void) -{ - hid_t fid; /* File ID */ - hid_t sid; /* 'Space ID */ - hid_t did; /* Dataset ID */ - hid_t dcpl; /* Dataset creation property list ID */ - int rank = MISC20_SPACE_RANK; /* Rank of dataspace */ - hsize_t big_dims[MISC20_SPACE_RANK] = {MISC20_SPACE_DIM0, MISC20_SPACE_DIM1}; /* Large dimensions */ - hsize_t small_dims[MISC20_SPACE_RANK] = {MISC20_SPACE2_DIM0, MISC20_SPACE2_DIM1}; /* Small dimensions */ - unsigned version; /* Version of storage layout info */ - hsize_t contig_size; /* Size of contiguous storage size from layout into */ - const char *testfile = H5_get_srcdir_filename(MISC20_FILE_OLD); /* Corrected test file name */ - bool driver_is_default_compatible; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing large dimension truncation fix\n")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - /* Verify that chunks with dimensions that are too large get rejected */ - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Try to use chunked storage for this dataset */ - ret = H5Pset_chunk(dcpl, rank, big_dims); - VERIFY(ret, FAIL, "H5Pset_chunk"); - - /* Verify that the storage for the dataset is the correct size and hasn't - * been truncated. - */ - - /* Create the file */ - fid = H5Fcreate(MISC20_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace with _really_ big dimensions */ - sid = H5Screate_simple(rank, big_dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Make certain that the dataset's storage doesn't get allocated :-) */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create dataset with big dataspace */ - did = H5Dcreate2(fid, MISC20_DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Close datasset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace with small dimensions */ - sid = H5Screate_simple(rank, small_dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create dataset with big dataspace */ - did = H5Dcreate2(fid, MISC20_DSET2_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Close datasset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid = H5Fopen(MISC20_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open dataset with big dimensions */ - did = H5Dopen2(fid, MISC20_DSET_NAME, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Get the layout version */ - ret = H5D__layout_version_test(did, &version); - CHECK(ret, FAIL, "H5D__layout_version_test"); - VERIFY(version, 3, "H5D__layout_version_test"); - - /* Get the layout contiguous storage size */ - ret = H5D__layout_contig_size_test(did, &contig_size); - CHECK(ret, FAIL, "H5D__layout_contig_size_test"); - VERIFY(contig_size, (MISC20_SPACE_DIM0 * MISC20_SPACE_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5D__layout_contig_size_test"); - - /* Close datasset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open dataset with small dimensions */ - did = H5Dopen2(fid, MISC20_DSET2_NAME, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Get the layout version */ - ret = H5D__layout_version_test(did, &version); - CHECK(ret, FAIL, "H5D__layout_version_test"); - VERIFY(version, 3, "H5D__layout_version_test"); - - /* Get the layout contiguous storage size */ - ret = H5D__layout_contig_size_test(did, &contig_size); - CHECK(ret, FAIL, "H5D__layout_contig_size_test"); - VERIFY(contig_size, (MISC20_SPACE2_DIM0 * MISC20_SPACE2_DIM1 * H5Tget_size(H5T_NATIVE_INT)), - "H5D__layout_contig_size_test"); - - /* Close datasset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Verify that the storage size is computed correctly for older versions of layout info */ - - /* - * Open the old file and the dataset and get old settings. - */ - fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open dataset with small dimensions */ - did = H5Dopen2(fid, MISC20_DSET_NAME, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); - - /* Get the layout version */ - ret = H5D__layout_version_test(did, &version); - CHECK(ret, FAIL, "H5D__layout_version_test"); - VERIFY(version, 2, "H5D__layout_version_test"); - - /* Get the layout contiguous storage size */ - ret = H5D__layout_contig_size_test(did, &contig_size); - CHECK(ret, FAIL, "H5D__layout_contig_size_test"); - VERIFY(contig_size, (MISC20_SPACE_DIM0 * MISC20_SPACE_DIM1 * H5Tget_size(H5T_STD_I32LE)), - "H5D__layout_contig_size_test"); - - /* Close datasset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_misc20() */ -#endif - -/* - test_misc21 and test_misc22 should be executed when SZIP is present - and encoder is available. - EIP 2004/8/04 -*/ -#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) - -/**************************************************************** -** -** test_misc21(): Test that late allocation time is treated the same -** as incremental allocation time, for chunked datasets -** when overwriting entire dataset where the chunks -** don't exactly match the dataspace. -** -****************************************************************/ -static void -test_misc21(void) -{ - hid_t fid, sid, dcpl, dsid; - char *buf; - hsize_t dims[2] = {MISC21_SPACE_DIM0, MISC21_SPACE_DIM1}, - chunk_size[2] = {MISC21_CHUNK_DIM0, MISC21_CHUNK_DIM1}; - herr_t ret; /* Generic return value */ - - if (h5_szip_can_encode() != 1) - return; - /* Output message about test being performed */ - MESSAGE(5, ("Testing late allocation time w/chunks & filters\n")); - - /* Allocate space for the buffer */ - buf = (char *)calloc(MISC21_SPACE_DIM0 * MISC21_SPACE_DIM1, 1); - CHECK(buf, NULL, "calloc"); - - /* Create the file */ - fid = H5Fcreate(MISC21_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create the DCPL */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set custom DCPL properties */ - ret = H5Pset_chunk(dcpl, MISC21_SPACE_RANK, chunk_size); - CHECK(ret, FAIL, "H5Pset_chunk"); - ret = H5Pset_szip(dcpl, H5_SZIP_NN_OPTION_MASK, 8); - CHECK(ret, FAIL, "H5Pset_deflate"); - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - /* Create the dataspace for the dataset */ - sid = H5Screate_simple(MISC21_SPACE_RANK, dims, NULL); - CHECK(ret, FAIL, "H5Screate_simple"); - - /* Create the dataset */ - dsid = H5Dcreate2(fid, MISC21_DSET_NAME, H5T_NATIVE_UINT8, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dcreate2"); - - /* Write out the whole dataset */ - ret = H5Dwrite(dsid, H5T_NATIVE_UINT8, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close everything */ - ret = H5Dclose(dsid); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - free(buf); -} /* end test_misc21() */ - -/**************************************************************** -** -** test_misc22(): Test SZIP bits-per-pixel parameter. -** This should be set according to the datatype. -** Tests for precision and offset combo's. -** -****************************************************************/ -static void -test_misc22(void) -{ - hid_t fid, sid, dcpl, dsid, dcpl2; - char *buf; - hsize_t dims[2] = {MISC22_SPACE_DIM0, MISC22_SPACE_DIM1}, - chunk_size[2] = {MISC22_CHUNK_DIM0, MISC22_CHUNK_DIM1}; - herr_t ret; /* Generic return value */ - hid_t dtype; - /* should extend test to signed ints */ - hid_t idts[4]; - /* do the same for floats - hid_t fdts[2]={H5T_NATIVE_FLOAT32, - H5T_NATIVE_FLOAT64} - */ - size_t prec[4] = {3, 11, 19, 27}; - size_t offsets[5] = {0, 3, 11, 19, 27}; - int i, j, k; - unsigned int flags; - size_t cd_nelmts = 32; - unsigned int cd_values[32]; - size_t correct; - - if (h5_szip_can_encode() != 1) - return; - idts[0] = H5Tcopy(H5T_NATIVE_UINT8); - idts[1] = H5Tcopy(H5T_NATIVE_UINT16); - idts[2] = H5Tcopy(H5T_NATIVE_UINT32); - idts[3] = H5Tcopy(H5T_NATIVE_UINT64); - - /* Output message about test being performed */ - MESSAGE(5, ("Testing datatypes with SZIP filter\n")); - - /* Allocate space for the buffer */ - buf = (char *)calloc(MISC22_SPACE_DIM0 * MISC22_SPACE_DIM1, 8); - CHECK(buf, NULL, "calloc"); - - /* Create the file */ - fid = H5Fcreate(MISC22_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create the dataspace for the dataset */ - sid = H5Screate_simple(MISC22_SPACE_RANK, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - for (i = 0; i < 4; i++) { - for (j = 0; j < 4; j++) { - if (prec[j] > (H5Tget_size(idts[i]) * 8)) - continue; /* skip irrelevant combination */ - for (k = 0; k < 5; k++) { - if (offsets[k] > (H5Tget_size(idts[i]) * 8)) - continue; /* skip irrelevant combinations */ - if ((prec[j] + offsets[k]) > (H5Tget_size(idts[i]) * 8)) - continue; - - MESSAGE(5, (" Testing datatypes size=%zu precision=%u offset=%d\n", H5Tget_size(idts[i]), - (unsigned)prec[j], (unsigned)offsets[k])); - - /* Create the DCPL */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set DCPL properties */ - ret = H5Pset_chunk(dcpl, MISC22_SPACE_RANK, chunk_size); - CHECK(ret, FAIL, "H5Pset_chunk"); - /* Set custom DCPL properties */ - ret = H5Pset_szip(dcpl, H5_SZIP_NN_OPTION_MASK, 32); /* vary the PPB */ - CHECK(ret, FAIL, "H5Pset_szip"); - - /* set up the datatype according to the loop */ - dtype = H5Tcopy(idts[i]); - CHECK(dtype, FAIL, "H5Tcopy"); - ret = H5Tset_precision(dtype, prec[j]); - CHECK(ret, FAIL, "H5Tset_precision"); - ret = H5Tset_offset(dtype, offsets[k]); - CHECK(ret, FAIL, "H5Tset_precision"); - - /* compute the correct PPB that should be set by SZIP */ - if (offsets[k] == 0) - correct = prec[j]; - else - correct = H5Tget_size(idts[i]) * 8; - if (correct > 24) { - if (correct <= 32) - correct = 32; - else if (correct <= 64) - correct = 64; - } /* end if */ - - /* Create the dataset */ - dsid = H5Dcreate2(fid, MISC22_DSET_NAME, dtype, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dcreate2"); - - /* Write out the whole dataset */ - ret = H5Dwrite(dsid, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close everything */ - ret = H5Dclose(dsid); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - dsid = H5Dopen2(fid, MISC22_DSET_NAME, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dopen2"); - - dcpl2 = H5Dget_create_plist(dsid); - CHECK(dcpl2, FAIL, "H5Dget_create_plist"); - - ret = H5Pget_filter_by_id2(dcpl2, H5Z_FILTER_SZIP, &flags, &cd_nelmts, cd_values, 0, NULL, - NULL); - CHECK(ret, FAIL, "H5Pget_filter_by_id2"); - - VERIFY(cd_values[2], (unsigned)correct, "SZIP filter returned value for precision"); - - ret = H5Dclose(dsid); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Ldelete(fid, MISC22_DSET_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - ret = H5Pclose(dcpl2); - CHECK(ret, FAIL, "H5Pclose"); - } - } - } - ret = H5Tclose(idts[0]); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(idts[1]); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(idts[2]); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(idts[3]); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - free(buf); -} /* end test_misc22() */ -#endif /* H5_HAVE_FILTER_SZIP */ - -/**************************************************************** -** -** test_misc23(): Test intermediate group creation. -** -****************************************************************/ -static void -test_misc23(void) -{ - hsize_t dims[] = {10}; - hid_t file_id = 0, group_id = 0, type_id = 0, space_id = 0, tmp_id = 0, create_id = H5P_DEFAULT, - access_id = H5P_DEFAULT; - char objname[MISC23_NAME_BUF_SIZE]; /* Name of object */ - H5O_info2_t oinfo; - htri_t tri_status; - ssize_t namelen; - herr_t status; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing intermediate group creation\n")); - - /* Create a new file using default properties. */ - file_id = H5Fcreate(MISC23_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Build some infrastructure */ - group_id = H5Gcreate2(file_id, "/A", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gcreate2"); - - space_id = H5Screate_simple(1, dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - type_id = H5Tcopy(H5T_STD_I32BE); - CHECK(type_id, FAIL, "H5Tcopy"); - -#ifndef H5_NO_DEPRECATED_SYMBOLS - /********************************************************************** - * test the old APIs - **********************************************************************/ - - H5E_BEGIN_TRY - { - tmp_id = H5Gcreate1(file_id, "/A/B00a/grp", (size_t)0); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gcreate1"); - - /* Make sure that size_hint values that can't fit into a 32-bit - * unsigned integer are rejected. Only necessary on systems where - * size_t is a 64-bit type. - */ - if (SIZE_MAX > UINT32_MAX) { - H5E_BEGIN_TRY - { - tmp_id = H5Gcreate1(file_id, "/size_hint_too_large", SIZE_MAX); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gcreate1"); - } - - /* Make sure the largest size_hint value works */ - H5E_BEGIN_TRY - { - tmp_id = H5Gcreate1(file_id, "/largest_size_hint", UINT32_MAX); - } - H5E_END_TRY - CHECK(tmp_id, FAIL, "H5Gcreate1"); - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - tmp_id = H5Gcreate1(file_id, "/A/grp", (size_t)0); - CHECK(tmp_id, FAIL, "H5Gcreate1"); - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dcreate1(file_id, "/A/B00c/dset", type_id, space_id, create_id); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dcreate1"); - - tmp_id = H5Dcreate1(file_id, "/A/dset", type_id, space_id, create_id); - CHECK(tmp_id, FAIL, "H5Dcreate1"); - status = H5Dclose(tmp_id); - CHECK(status, FAIL, "H5Dclose"); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - - /********************************************************************** - * test H5Gcreate2() - **********************************************************************/ - - /* Create link creation property list */ - create_id = H5Pcreate(H5P_LINK_CREATE); - CHECK(create_id, FAIL, "H5Pcreate"); - - /* Set flag for intermediate group creation */ - status = H5Pset_create_intermediate_group(create_id, true); - CHECK(status, FAIL, "H5Pset_create_intermediate_group"); - - tmp_id = H5Gcreate2(file_id, "/A/B01/grp", create_id, H5P_DEFAULT, access_id); - CHECK(tmp_id, FAIL, "H5Gcreate2"); - - /* Query that the name of the new group is correct */ - namelen = H5Iget_name(tmp_id, objname, (size_t)MISC23_NAME_BUF_SIZE); - CHECK(namelen, FAIL, "H5Iget_name"); - VERIFY_STR(objname, "/A/B01/grp", "H5Iget_name"); - - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - /* Check that intermediate group is set up correctly */ - tmp_id = H5Gopen2(file_id, "/A/B01", H5P_DEFAULT); - CHECK(tmp_id, FAIL, "H5Gopen2"); - - status = H5Oget_info3(tmp_id, &oinfo, H5O_INFO_BASIC); - CHECK(status, FAIL, "H5Oget_info3"); - VERIFY(oinfo.rc, 1, "H5Oget_info3"); - - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - tmp_id = H5Gcreate2(file_id, "/A/B02/C02/grp", create_id, H5P_DEFAULT, access_id); - CHECK(tmp_id, FAIL, "H5Gcreate2"); - - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - tmp_id = H5Gcreate2(group_id, "B03/grp/", create_id, H5P_DEFAULT, access_id); - CHECK(tmp_id, FAIL, "H5Gcreate2"); - - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - tmp_id = H5Gcreate2(group_id, "/A/B04/grp/", create_id, H5P_DEFAULT, access_id); - CHECK(tmp_id, FAIL, "H5Gcreate2"); - - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - tmp_id = H5Gcreate2(file_id, "/A/B05/C05/A", create_id, H5P_DEFAULT, access_id); - CHECK(tmp_id, FAIL, "H5Gcreate2"); - - status = H5Gclose(tmp_id); - CHECK(status, FAIL, "H5Gclose"); - - status = H5Pclose(create_id); - CHECK(status, FAIL, "H5Pclose"); - - /********************************************************************** - * test H5Dcreate2() - **********************************************************************/ - - /* Create link creation property list */ - create_id = H5Pcreate(H5P_LINK_CREATE); - CHECK(create_id, FAIL, "H5Pcreate"); - - /* Set flag for intermediate group creation */ - status = H5Pset_create_intermediate_group(create_id, true); - CHECK(status, FAIL, "H5Pset_create_intermediate_group"); - - tmp_id = H5Dcreate2(file_id, "/A/B06/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(tmp_id, FAIL, "H5Dcreate2"); - - status = H5Dclose(tmp_id); - CHECK(status, FAIL, "H5Dclose"); - - tmp_id = H5Dcreate2(file_id, "/A/B07/B07/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(tmp_id, FAIL, "H5Dcreate2"); - - status = H5Dclose(tmp_id); - CHECK(status, FAIL, "H5Dclose"); - - tmp_id = H5Dcreate2(group_id, "B08/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(tmp_id, FAIL, "H5Dcreate2"); - - status = H5Dclose(tmp_id); - CHECK(status, FAIL, "H5Dclose"); - - tmp_id = H5Dcreate2(group_id, "/A/B09/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(tmp_id, FAIL, "H5Dcreate2"); - - status = H5Dclose(tmp_id); - CHECK(status, FAIL, "H5Dclose"); - - tmp_id = H5Dcreate2(file_id, "/A/B10/C10/A/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(tmp_id, FAIL, "H5Dcreate2"); - - status = H5Dclose(tmp_id); - CHECK(status, FAIL, "H5Dclose"); - - status = H5Tclose(type_id); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Sclose(space_id); - CHECK(status, FAIL, "H5Sclose"); - - status = H5Pclose(create_id); - CHECK(status, FAIL, "H5Pclose"); - - /********************************************************************** - * test H5Tcommit2() - **********************************************************************/ - - /* Create link creation property list */ - create_id = H5Pcreate(H5P_LINK_CREATE); - CHECK(create_id, FAIL, "H5Pcreate"); - - /* Set flag for intermediate group creation */ - status = H5Pset_create_intermediate_group(create_id, true); - CHECK(status, FAIL, "H5Pset_create_intermediate_group"); - - tmp_id = H5Tcopy(H5T_NATIVE_INT16); - CHECK(tmp_id, FAIL, "H5Tcopy"); - - status = H5Tcommit2(file_id, "/A/B11/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); - CHECK(status, FAIL, "H5Tcommit2"); - - status = H5Tclose(tmp_id); - CHECK(status, FAIL, "H5Tclose"); - - tmp_id = H5Tcopy(H5T_NATIVE_INT32); - CHECK(tmp_id, FAIL, "H5Tcopy"); - - status = H5Tcommit2(file_id, "/A/B12/C12/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); - CHECK(status, FAIL, "H5Tcommit2"); - - status = H5Tclose(tmp_id); - CHECK(status, FAIL, "H5Tclose"); - - tmp_id = H5Tcopy(H5T_NATIVE_INT64); - CHECK(tmp_id, FAIL, "H5Tcopy"); - - status = H5Tcommit2(group_id, "B13/C12/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); - CHECK(status, FAIL, "H5Tcommit2"); - - status = H5Tclose(tmp_id); - CHECK(status, FAIL, "H5Tclose"); - - tmp_id = H5Tcopy(H5T_NATIVE_FLOAT); - CHECK(tmp_id, FAIL, "H5Tcopy"); - - status = H5Tcommit2(group_id, "/A/B14/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); - CHECK(status, FAIL, "H5Tcommit2"); - - status = H5Tclose(tmp_id); - CHECK(status, FAIL, "H5Tclose"); - - tmp_id = H5Tcopy(H5T_NATIVE_DOUBLE); - CHECK(tmp_id, FAIL, "H5Tcopy"); - - status = H5Tcommit2(file_id, "/A/B15/C15/A/dtype", tmp_id, create_id, H5P_DEFAULT, access_id); - CHECK(status, FAIL, "H5Tcommit2"); - - status = H5Tclose(tmp_id); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Pclose(create_id); - CHECK(status, FAIL, "H5Pclose"); - - /********************************************************************** - * test H5Lcopy() - **********************************************************************/ - - /* Create link creation property list */ - create_id = H5Pcreate(H5P_LINK_CREATE); - CHECK(create_id, FAIL, "H5Pcreate"); - - /* Set flag for intermediate group creation */ - status = H5Pset_create_intermediate_group(create_id, true); - CHECK(status, FAIL, "H5Pset_create_intermediate_group"); - - status = H5Lcopy(file_id, "/A/B01/grp", file_id, "/A/B16/grp", create_id, access_id); - CHECK(status, FAIL, "H5Lcopy"); - - tri_status = H5Lexists(file_id, "/A/B16/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); - - tri_status = H5Lexists(file_id, "/A/B01/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); - - /********************************************************************** - * test H5Lmove() - **********************************************************************/ - - status = H5Lmove(file_id, "/A/B16/grp", file_id, "/A/B17/grp", create_id, access_id); - CHECK(status, FAIL, "H5Lmove"); - - tri_status = H5Lexists(file_id, "/A/B17/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); - - tri_status = H5Lexists(file_id, "/A/B16/grp", access_id); - VERIFY(tri_status, false, "H5Lexists"); - - /********************************************************************** - * test H5Lcreate_hard() - **********************************************************************/ - - status = H5Lcreate_hard(file_id, "/A/B01/grp", file_id, "/A/B18/grp", create_id, access_id); - CHECK(status, FAIL, "H5Lcreate_hard"); - - tri_status = H5Lexists(file_id, "/A/B18/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); - - /********************************************************************** - * test H5Lcreate_soft() - **********************************************************************/ - - status = H5Lcreate_soft("/A/B01/grp", file_id, "/A/B19/grp", create_id, access_id); - CHECK(status, FAIL, "H5Lcreate_soft"); - - tri_status = H5Lexists(file_id, "/A/B19/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); - - /********************************************************************** - * test H5Lcreate_external() - **********************************************************************/ - - if (vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) { - status = - H5Lcreate_external("fake_filename", "fake_path", file_id, "/A/B20/grp", create_id, access_id); - CHECK(status, FAIL, "H5Lcreate_external"); - - tri_status = H5Lexists(file_id, "/A/B20/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); - } - - /********************************************************************** - * test H5Lcreate_ud() - **********************************************************************/ - - if (vol_cap_flags_g & H5VL_CAP_FLAG_UD_LINKS) { - status = H5Lcreate_ud(file_id, "/A/B21/grp", H5L_TYPE_EXTERNAL, "file\0obj", (size_t)9, create_id, - access_id); - CHECK(status, FAIL, "H5Lcreate_ud"); - - tri_status = H5Lexists(file_id, "/A/B21/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); - } - - /********************************************************************** - * close - **********************************************************************/ - - status = H5Pclose(create_id); - CHECK(status, FAIL, "H5Pclose"); - - status = H5Gclose(group_id); - CHECK(status, FAIL, "H5Gclose"); - - status = H5Fclose(file_id); - CHECK(status, FAIL, "H5Fclose"); - -} /* end test_misc23() */ - -/**************************************************************** -** -** test_misc24(): Test opening objects with inappropriate APIs -** -****************************************************************/ -static void -test_misc24(void) -{ -#if 0 - hid_t file_id = 0, group_id = 0, type_id = 0, space_id = 0, dset_id = 0, tmp_id = 0; - herr_t ret; /* Generic return value */ -#endif - - /* Output message about test being performed */ - MESSAGE(5, - ("Testing opening objects with inappropriate APIs - SKIPPED due to causing problems in HDF5\n")); -#if 0 - /* Create a new file using default properties. */ - file_id = H5Fcreate(MISC24_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - space_id = H5Screate(H5S_SCALAR); - CHECK(space_id, FAIL, "H5Screate"); - - /* Create group, dataset & named datatype objects */ - group_id = H5Gcreate2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gcreate2"); - - dset_id = H5Dcreate2(file_id, MISC24_DATASET_NAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - type_id = H5Tcopy(H5T_NATIVE_INT); - CHECK(type_id, FAIL, "H5Tcopy"); - - ret = H5Tcommit2(file_id, MISC24_DATATYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create soft links to the objects created */ - ret = H5Lcreate_soft(MISC24_GROUP_NAME, file_id, MISC24_GROUP_LINK, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - - ret = H5Lcreate_soft(MISC24_DATASET_NAME, file_id, MISC24_DATASET_LINK, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - - ret = H5Lcreate_soft(MISC24_DATATYPE_NAME, file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_soft"); - - /* Close IDs for objects */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); - - /* Attempt to open each kind of object with wrong API, including using soft links */ - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - /* Try again, with the object already open through valid call */ - /* Open group */ - group_id = H5Gopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open dataset */ - dset_id = H5Dopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Topen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Topen2"); - - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open named datatype */ - type_id = H5Topen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Topen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Gopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - H5E_BEGIN_TRY - { - tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(tmp_id, FAIL, "H5Dopen2"); - - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close file */ - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); -#endif -} /* end test_misc24() */ - -/**************************************************************** -** -** test_misc25a(): Exercise null object header message merge bug -** with new file -** -****************************************************************/ -static void -test_misc25a(void) -{ - hid_t fid; /* File ID */ - hid_t gid, gid2, gid3; /* Group IDs */ - hid_t aid; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Exercise null object header message bug\n")); - - /* Create file */ - fid = H5Fcreate(MISC25A_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create top group */ - gid = H5Gcreate2(fid, MISC25A_GROUP0_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Close top group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create first group */ - gid = H5Gcreate2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create second group */ - gid2 = H5Gcreate2(fid, MISC25A_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gcreate2"); - - /* Close second group */ - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open first group */ - gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create dataspace for attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create datatype for attribute */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid, (size_t)MISC25A_ATTR1_LEN); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Add 1st attribute on first group */ - aid = H5Acreate2(gid, MISC25A_ATTR1_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create dataspace for 2nd attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create datatype for attribute */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Add 2nd attribute on first group */ - aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close 2nd attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create third group */ - gid3 = H5Gcreate2(fid, MISC25A_GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid3, FAIL, "H5Gcreate2"); - - /* Close third group */ - ret = H5Gclose(gid3); - CHECK(ret, FAIL, "H5Gclose"); - - /* Re-open first group */ - gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Delete 2nd attribute */ - ret = H5Adelete(gid, MISC25A_ATTR2_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open first group */ - gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create dataspace for 3rd attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create datatype for attribute */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid, (size_t)MISC25A_ATTR3_LEN); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Add 3rd attribute on first group (smaller than 2nd attribute) */ - aid = H5Acreate2(gid, MISC25A_ATTR3_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close 3rd attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open first group */ - gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Delete 3rd attribute */ - ret = H5Adelete(gid, MISC25A_ATTR3_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Create dataspace for 3rd attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create datatype for attribute */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Re-create 2nd attribute on first group */ - aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close 2nd attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open first group */ - gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Delete 2nd attribute */ - ret = H5Adelete(gid, MISC25A_ATTR2_NAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file */ - fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open first group */ - gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Create dataspace for 3rd attribute */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create datatype for attribute */ - tid = H5Tcopy(H5T_C_S1); - CHECK(tid, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Re-create 2nd attribute on first group */ - aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close 2nd attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc25a() */ - -/**************************************************************** -** -** test_misc25b(): Exercise null object header message merge bug -** with existing file (This test relies on -** the file produced by test/gen_mergemsg.c) -** -****************************************************************/ -#if 0 -static void -test_misc25b(void) -{ - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - const char *testfile = H5_get_srcdir_filename(MISC25B_FILE); /* Corrected test file name */ - bool driver_is_default_compatible; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Exercise null object header message bug\n")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - /* Open file */ - fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open group with object header messages that will merge */ - gid = H5Gopen2(fid, MISC25B_GROUP, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Close first group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc25b() */ -#endif - -/**************************************************************** -** -** test_misc25c(): Exercise another null object header message merge bug. -** -****************************************************************/ -static void -test_misc25c(void) -{ - hid_t fid; /* File ID */ - hid_t fapl; /* File access property list ID */ - hid_t gcpl; /* Group creation property list ID */ - hid_t sid; /* Dataspace ID */ - hid_t did; /* Dataset ID */ - hid_t gid; /* Group ID */ - hid_t gid2; /* Group ID */ - hid_t aid; /* Attribute ID */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Exercise another null object header message bug\n")); - - /* Compose file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create the file */ - fid = H5Fcreate(MISC25C_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Compose group creation property list */ - gcpl = H5Pcreate(H5P_GROUP_CREATE); - CHECK(gcpl, FAIL, "H5Pcreate"); - ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - CHECK(ret, FAIL, "H5Pset_link_creation_order"); - ret = H5Pset_attr_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - CHECK(ret, FAIL, "H5Pset_attr_creation_order"); - ret = H5Pset_est_link_info(gcpl, 1, 18); - CHECK(ret, FAIL, "H5Pset_est_link_info"); - - /* Create a group for the dataset */ - gid = H5Gcreate2(fid, MISC25C_DSETGRPNAME, H5P_DEFAULT, gcpl, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - /* Create the dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create the dataset */ - did = H5Dcreate2(gid, MISC25C_DSETNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Create an extra group */ - gid2 = H5Gcreate2(fid, MISC25C_GRPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gcreate2"); - - /* Close the extra group */ - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Add an attribute to the dataset group */ - aid = H5Acreate2(gid, MISC25C_ATTRNAME, H5T_NATIVE_CHAR, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create a second extra group */ - gid2 = H5Gcreate2(fid, MISC25C_GRPNAME2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, FAIL, "H5Gcreate2"); - - /* Close the second extra group */ - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Add second attribute to the dataset group */ - aid = H5Acreate2(gid, MISC25C_ATTRNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(aid, FAIL, "H5Acreate2"); - - /* Close the attribute */ - ret = H5Aclose(aid); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataset group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Close the property lists */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Pclose(gcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Re-open the file */ - fid = H5Fopen(MISC25C_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Re-open the dataset group */ - gid = H5Gopen2(fid, MISC25C_DSETGRPNAME, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Rename the dataset */ - ret = H5Lmove(gid, MISC25C_DSETNAME, H5L_SAME_LOC, MISC25C_DSETNAME2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lmove"); - - /* Delete the first attribute */ - ret = H5Adelete(gid, MISC25C_ATTRNAME); - CHECK(ret, FAIL, "H5Adelete"); - - /* Close the dataset group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc25c() */ - -/**************************************************************** -** -** test_misc26(): Regression test: ensure that copying filter -** pipelines works properly. -** -****************************************************************/ -static void -test_misc26(void) -{ - hid_t fid; /* File ID */ - hid_t sid; /* Dataspace ID */ - hid_t did; /* Dataset ID */ - hid_t dcpl1, dcpl2, dcpl3; /* Property List IDs */ - hsize_t dims[] = {1}; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Copying filter pipelines\n")); - - /* Create the property list. It needs chunking so we can add filters */ - dcpl1 = H5Pcreate(H5P_DATASET_CREATE); - CHECK_I(dcpl1, "H5Pcreate"); - ret = H5Pset_chunk(dcpl1, 1, dims); - CHECK_I(ret, "H5Pset_chunk"); - - /* Add a filter with a data value to the property list */ - ret = H5Pset_deflate(dcpl1, 1); - CHECK_I(ret, "H5Pset_deflate"); - - /* Copy the property list */ - dcpl2 = H5Pcopy(dcpl1); - CHECK_I(dcpl2, "H5Pcopy"); - - /* Add a filter with no data values to the copy */ - ret = H5Pset_shuffle(dcpl2); - CHECK_I(ret, "H5Pset_shuffle"); - - /* Copy the copy */ - dcpl3 = H5Pcopy(dcpl2); - CHECK_I(dcpl3, "H5Pcopy"); - - /* Add another filter */ - ret = H5Pset_deflate(dcpl3, 2); - CHECK_I(ret, "H5Pset_deflate"); - - /* Create a new file and datasets within that file that use these - * property lists - */ - fid = H5Fcreate(MISC26_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - sid = H5Screate_simple(1, dims, dims); - CHECK(sid, FAIL, "H5Screate_simple"); - - did = H5Dcreate2(fid, "dataset1", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl1, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - ret = H5Dclose(did); - CHECK_I(ret, "H5Dclose"); - - did = H5Dcreate2(fid, "dataset2", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl2, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - ret = H5Dclose(did); - CHECK_I(ret, "H5Dclose"); - - did = H5Dcreate2(fid, "dataset3", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl3, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - ret = H5Dclose(did); - CHECK_I(ret, "H5Dclose"); - - /* Close the dataspace and file */ - ret = H5Sclose(sid); - CHECK_I(ret, "H5Sclose"); - ret = H5Fclose(fid); - CHECK_I(ret, "H5Fclose"); - - /* Close the property lists. */ - ret = H5Pclose(dcpl1); - CHECK_I(ret, "H5Pclose"); - ret = H5Pclose(dcpl2); - CHECK_I(ret, "H5Pclose"); - ret = H5Pclose(dcpl3); - CHECK_I(ret, "H5Pclose"); -} - -/**************************************************************** -** -** test_misc27(): Ensure that objects with incorrect # of object -** header messages are handled appropriately. -** -** (Note that this test file is generated by the "gen_bad_ohdr.c" code) -** -****************************************************************/ -#if 0 -static void -test_misc27(void) -{ - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - const char *testfile = H5_get_srcdir_filename(MISC27_FILE); /* Corrected test file name */ - bool driver_is_default_compatible; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Corrupt object header handling\n")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - /* Open the file */ - fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - -#ifdef H5_STRICT_FORMAT_CHECKS - /* Open group with incorrect # of object header messages (should fail) */ - H5E_BEGIN_TRY - { - gid = H5Gopen2(fid, MISC27_GROUP, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(gid, FAIL, "H5Gopen2"); -#else /* H5_STRICT_FORMAT_CHECKS */ - /* Open group with incorrect # of object header messages */ - gid = H5Gopen2(fid, MISC27_GROUP, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gopen2"); - - /* Close group */ - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); -#endif /* H5_STRICT_FORMAT_CHECKS */ - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc27() */ -#endif - -/**************************************************************** -** -** test_misc28(): Ensure that the dataset chunk cache will hold -** the correct number of chunks in cache without -** evicting them. -** -****************************************************************/ -static void -test_misc28(void) -{ - hid_t fid; /* File ID */ - hid_t sidf; /* File Dataspace ID */ - hid_t sidm; /* Memory Dataspace ID */ - hid_t did; /* Dataset ID */ - hid_t dcpl, fapl; /* Property List IDs */ - hsize_t dims[] = {MISC28_SIZE, MISC28_SIZE}; - hsize_t mdims[] = {MISC28_SIZE}; - hsize_t cdims[] = {1, 1}; - hsize_t start[] = {0, 0}; - hsize_t count[] = {MISC28_SIZE, 1}; -#if 0 - size_t nbytes_used; - int nused; -#endif - char buf[MISC28_SIZE]; - int i; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Dataset chunk cache\n")); - - /* Create the fapl and set the cache size. Set nelmts to larger than the - * file size so we can be guaranteed that no chunks will be evicted due to - * a hash collision. Set nbytes to fit exactly 1 column of chunks (10 - * bytes). */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - ret = H5Pset_cache(fapl, MISC28_NSLOTS, MISC28_NSLOTS, MISC28_SIZE, 0.75); - CHECK(ret, FAIL, "H5Pset_cache"); - - /* Create the dcpl and set the chunk size */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, 2, cdims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create a new file and datasets within that file that use these - * property lists - */ - fid = H5Fcreate(MISC28_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - sidf = H5Screate_simple(2, dims, NULL); - CHECK(sidf, FAIL, "H5Screate_simple"); - - did = H5Dcreate2(fid, "dataset", H5T_NATIVE_CHAR, sidf, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); -#if 0 - /* Verify that the chunk cache is empty */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); - VERIFY(nused, 0, "H5D__current_cache_size_test"); -#endif - /* Initialize write buffer */ - for (i = 0; i < MISC28_SIZE; i++) - buf[i] = (char)i; - - /* Create memory dataspace and selection in file dataspace */ - sidm = H5Screate_simple(1, mdims, NULL); - CHECK(sidm, FAIL, "H5Screate_simple"); - - ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write hypserslab */ - ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); - CHECK(ret, FAIL, "H5Dwrite"); -#if 0 - /* Verify that all 10 chunks written have been cached */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); -#endif - /* Initialize write buffer */ - for (i = 0; i < MISC28_SIZE; i++) - buf[i] = (char)(MISC28_SIZE - 1 - i); - - /* Select new hyperslab */ - start[1] = 1; - ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write hyperslab */ - ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); - CHECK(ret, FAIL, "H5Dwrite"); -#if 0 - /* Verify that the size of the cache remains at 10 */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); -#endif - /* Close dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Re open dataset */ - did = H5Dopen2(fid, "dataset", H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); -#if 0 - /* Verify that the chunk cache is empty */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); - VERIFY(nused, 0, "H5D__current_cache_size_test"); -#endif - /* Select hyperslabe for reading */ - start[1] = 0; - ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read hypserslab */ - ret = H5Dread(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read */ - for (i = 0; i < MISC28_SIZE; i++) - VERIFY(buf[i], i, "H5Dread"); -#if 0 - /* Verify that all 10 chunks read have been cached */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); -#endif - /* Select new hyperslab */ - start[1] = 1; - ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read hyperslab */ - ret = H5Dread(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the data read */ - for (i = 0; i < MISC28_SIZE; i++) - VERIFY(buf[i], MISC28_SIZE - 1 - i, "H5Dread"); -#if 0 - /* Verify that the size of the cache remains at 10 */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); -#endif - /* Close dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataspaces and file */ - ret = H5Sclose(sidf); - CHECK_I(ret, "H5Sclose"); - ret = H5Sclose(sidm); - CHECK_I(ret, "H5Sclose"); - ret = H5Fclose(fid); - CHECK_I(ret, "H5Fclose"); - - /* Close the property lists. */ - ret = H5Pclose(dcpl); - CHECK_I(ret, "H5Pclose"); - ret = H5Pclose(fapl); - CHECK_I(ret, "H5Pclose"); -} /* end test_misc28() */ - -/**************************************************************** -** -** test_misc29(): Ensure that speculative metadata reads don't -** get raw data into the metadata accumulator. -** -****************************************************************/ -#if 0 -static void -test_misc29(void) -{ - bool driver_is_default_compatible; - hid_t fid; /* File ID */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Speculative metadata reads\n")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - /* Make a copy of the data file from svn. */ - ret = h5_make_local_copy(MISC29_ORIG_FILE, MISC29_COPY_FILE); - CHECK(ret, -1, "h5_make_local_copy"); - - /* Open the copied file */ - fid = H5Fopen(MISC29_COPY_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Delete the last dataset */ - ret = H5Ldelete(fid, MISC29_DSETNAME, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_misc29() */ -#endif - -#if 0 -static int -test_misc30_get_info_cb(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_UNUSED *info, - void H5_ATTR_UNUSED *op_data) -{ - H5O_info2_t object_info; - - return H5Oget_info_by_name3(loc_id, name, &object_info, H5O_INFO_BASIC, H5P_DEFAULT); -} - -static int -test_misc30_get_info(hid_t loc_id) -{ - return H5Literate2(loc_id, H5_INDEX_NAME, H5_ITER_INC, NULL, test_misc30_get_info_cb, NULL); -} -#endif - -/**************************************************************** -** -** test_misc30(): Exercise local heap code that loads prefix -** separately from data block, causing the free -** block information to get lost. -** -****************************************************************/ -#if 0 -static void -test_misc30(void) -{ - hsize_t file_size[] = {0, 0}; /* Sizes of file created */ - unsigned get_info; /* Whether to perform the get info call */ - - /* Output message about test being performed */ - MESSAGE(5, ("Local heap dropping free block info\n")); - - for (get_info = false; get_info <= true; get_info++) { - hid_t fid; /* File ID */ - hid_t gid; /* Group ID */ - int i; /* Local index counter */ - herr_t ret; /* Generic return value */ - - fid = H5Fcreate(MISC30_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - gid = H5Gcreate2(fid, "/g0", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - for (i = 0; i < 20; i++) { - char gname[32]; - - fid = H5Fopen(MISC30_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - if (get_info) { - ret = test_misc30_get_info(fid); - CHECK(ret, FAIL, "test_misc30_get_info"); - } - - snprintf(gname, sizeof(gname), "/g0/group%d", i); - gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, FAIL, "H5Gcreate2"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } - - fid = H5Fopen(MISC30_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - ret = H5Fget_filesize(fid, &file_size[get_info]); - CHECK(fid, FAIL, "H5Fget_filesize"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - } - - VERIFY(file_size[0], file_size[1], "test_misc30"); -} /* end test_misc30() */ -#endif - -/**************************************************************** -** -** test_misc31(): Test reentering library through deprecated -* routines that register an id after calling -* H5close(). -** -****************************************************************/ -#if 0 -static void -test_misc31(void) -{ -#ifndef H5_NO_DEPRECATED_SYMBOLS - hid_t file_id; /* File id */ - hid_t space_id; /* Dataspace id */ - hid_t dset_id; /* Dataset id */ - hid_t attr_id; /* Attribute id */ - hid_t group_id; /* Group id */ - hid_t dtype_id; /* Datatype id */ - herr_t ret; /* Generic return value */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ - - /* Output message about test being performed */ - MESSAGE(5, ("Deprecated routines initialize after H5close()\n")); - -#ifndef H5_NO_DEPRECATED_SYMBOLS - file_id = H5Fcreate(MISC31_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Test dataset package */ - space_id = H5Screate(H5S_SCALAR); - CHECK(space_id, FAIL, "H5Screate"); - dset_id = H5Dcreate1(file_id, MISC31_DSETNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate1"); - ret = H5close(); - CHECK(ret, FAIL, "H5close"); - file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - dset_id = H5Dopen1(file_id, MISC31_DSETNAME); - CHECK(dset_id, FAIL, "H5Dopen1"); - - /* Test attribute package */ - space_id = H5Screate(H5S_SCALAR); - CHECK(space_id, FAIL, "H5Screate"); - attr_id = H5Acreate1(dset_id, MISC31_ATTRNAME1, H5T_NATIVE_INT, space_id, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate1"); - ret = H5close(); - CHECK(ret, FAIL, "H5close"); - file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - dset_id = H5Dopen1(file_id, MISC31_DSETNAME); - CHECK(dset_id, FAIL, "H5Dopen1"); - space_id = H5Screate(H5S_SCALAR); - CHECK(space_id, FAIL, "H5Screate"); - attr_id = H5Acreate1(dset_id, MISC31_ATTRNAME2, H5T_NATIVE_INT, space_id, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate1"); - - /* Test group package */ - group_id = H5Gcreate1(file_id, MISC31_GROUPNAME, 0); - CHECK(group_id, FAIL, "H5Gcreate1"); - ret = H5close(); - CHECK(ret, FAIL, "H5close"); - file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - group_id = H5Gopen1(file_id, MISC31_GROUPNAME); - CHECK(group_id, FAIL, "H5Gopen1"); - - /* Test property list package */ - ret = H5Pregister1(H5P_OBJECT_CREATE, MISC31_PROPNAME, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK(ret, FAIL, "H5Pregister1"); - ret = H5close(); - CHECK(ret, FAIL, "H5close"); - ret = H5Pregister1(H5P_OBJECT_CREATE, MISC31_PROPNAME, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); - CHECK(ret, FAIL, "H5Pregister1"); - ret = H5close(); - CHECK(ret, FAIL, "H5close"); - - /* Test datatype package */ - file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - dtype_id = H5Tcopy(H5T_NATIVE_INT); - CHECK(dtype_id, FAIL, "H5Tcopy"); - ret = H5Tcommit1(file_id, MISC31_DTYPENAME, dtype_id); - CHECK(ret, FAIL, "H5Tcommit1"); - ret = H5close(); - CHECK(ret, FAIL, "H5close"); - file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - dtype_id = H5Topen1(file_id, MISC31_DTYPENAME); - CHECK(ret, FAIL, "H5Topen1"); - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Tclose(dtype_id); - CHECK(ret, FAIL, "H5Tclose"); - -#else /* H5_NO_DEPRECATED_SYMBOLS */ - /* Output message about test being skipped */ - MESSAGE(5, (" ...Skipped")); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -} /* end test_misc31() */ -#endif - -/**************************************************************** - * - * test_misc32(): Simple test of filter memory allocation - * functions. - * - ***************************************************************/ -static void -test_misc32(void) -{ - void *buffer; - void *resized; - size_t size; - - /* Output message about test being performed */ - MESSAGE(5, ("Edge case test of filter memory allocation functions\n")); - - /* Test that the filter memory allocation functions behave correctly - * at edge cases. - */ - - /* FREE */ - - /* Test freeing a NULL pointer. - * No real confirmation check here, but Valgrind will confirm no - * shenanigans. - */ - buffer = NULL; - H5free_memory(buffer); - - /* ALLOCATE */ - - /* Size zero returns NULL. - * Also checks that a size of zero and setting the buffer clear flag - * to true can be used together. - * - * Note that we have asserts in the code, so only check when NDEBUG - * is defined. - */ -#ifdef NDEBUG - buffer = H5allocate_memory(0, false); - CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/ - buffer = H5allocate_memory(0, true); - CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/ -#endif /* NDEBUG */ - - /* RESIZE */ - - /* Size zero returns NULL. Valgrind will confirm buffer is freed. */ - size = 1024; - buffer = H5allocate_memory(size, true); - resized = H5resize_memory(buffer, 0); - CHECK_PTR_NULL(resized, "H5resize_memory"); - - /* NULL input pointer returns new buffer */ - resized = H5resize_memory(NULL, 1024); - CHECK_PTR(resized, "H5resize_memory"); - H5free_memory(resized); - - /* NULL input pointer and size zero returns NULL */ -#ifdef NDEBUG - resized = H5resize_memory(NULL, 0); - CHECK_PTR_NULL(resized, "H5resize_memory"); /*BAD*/ -#endif /* NDEBUG */ - -} /* end test_misc32() */ - -/**************************************************************** -** -** test_misc33(): Test for H5FFV-10216 -** --verify that H5HL_offset_into() returns error if the -** input parameter "offset" exceeds heap data block size. -** --case (1), (2), (3) are scenarios that will traverse to the -** the 3 locations in the file having bad offset values to -** the heap. (See description in gen_bad_offset.c) -** -****************************************************************/ -#if 0 -static void -test_misc33(void) -{ - hid_t fid = -1; /* File ID */ - const char *testfile = H5_get_srcdir_filename(MISC33_FILE); /* Corrected test file name */ - H5O_info2_t oinfo; /* Structure for object metadata information */ - bool driver_is_default_compatible; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing that bad offset into the heap returns error")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - /* Open the test file */ - fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Case (1) */ - H5E_BEGIN_TRY - { - ret = H5Oget_info_by_name3(fid, "/soft_two", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Oget_info_by_name3"); - - /* Case (2) */ - H5E_BEGIN_TRY - { - ret = H5Oget_info_by_name3(fid, "/dsetA", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Oget_info_by_name3"); - - /* Case (3) */ - H5E_BEGIN_TRY - { - ret = H5Oget_info_by_name3(fid, "/soft_one", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Oget_info_by_name3"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(fid, FAIL, "H5Fclose"); - -} /* end test_misc33() */ -#endif - -/**************************************************************** -** -** test_misc34(): Ensure zero-size memory allocations work -** -****************************************************************/ -#if 0 -static void -test_misc34(void) -{ - void *mem = NULL; /* allocated buffer */ - char *dup = NULL; /* 'duplicated' string */ - size_t sz = 0; /* buffer size */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing O and NULL behavior in H5MM API calls")); - - /* H5MM_xfree(): Ensure that passing NULL is allowed and returns NULL */ - mem = H5MM_xfree(mem); - CHECK_PTR_NULL(mem, "H5MM_xfree"); - - /* H5MM_realloc(): Check behavior: - * - * H5MM_realloc(NULL, size) <==> H5MM_malloc(size) - * H5MM_realloc(ptr, 0) <==> H5MM_xfree(ptr) - * H5MM_realloc(NULL, 0) <==> NULL - */ - mem = H5MM_xfree(mem); - - sz = 1024; - mem = H5MM_realloc(mem, sz); - CHECK_PTR(mem, "H5MM_realloc (case 1)"); - /* Don't free mem here! */ - - sz = 0; - mem = H5MM_realloc(mem, sz); - CHECK_PTR_NULL(mem, "H5MM_realloc (case 2)"); - mem = H5MM_xfree(mem); - - mem = H5MM_realloc(mem, sz); - CHECK_PTR_NULL(mem, "H5MM_realloc (case 3)"); - mem = H5MM_xfree(mem); - - /* H5MM_xstrdup(): Ensure NULL returns NULL */ - dup = H5MM_xstrdup((const char *)mem); - CHECK_PTR_NULL(dup, "H5MM_xstrdup"); - dup = (char *)H5MM_xfree((void *)dup); - -} /* end test_misc34() */ - -/**************************************************************** -** -** test_misc35(): Check operation of free-list routines -** -****************************************************************/ -static void -test_misc35(void) -{ - hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ - hsize_t dims[] = {MISC35_SPACE_DIM1, MISC35_SPACE_DIM2, MISC35_SPACE_DIM3}; /* Dataspace dims */ - hsize_t coord[MISC35_NPOINTS][MISC35_SPACE_RANK] = /* Coordinates for point selection */ - {{0, 10, 5}, {1, 2, 7}, {2, 4, 9}, {0, 6, 11}, {1, 8, 13}, - {2, 12, 0}, {0, 14, 2}, {1, 0, 4}, {2, 1, 6}, {0, 3, 8}}; - size_t reg_size_start; /* Initial amount of regular memory allocated */ - size_t arr_size_start; /* Initial amount of array memory allocated */ - size_t blk_size_start; /* Initial amount of block memory allocated */ - size_t fac_size_start; /* Initial amount of factory memory allocated */ - size_t reg_size_final; /* Final amount of regular memory allocated */ - size_t arr_size_final; /* Final amount of array memory allocated */ - size_t blk_size_final; /* Final amount of block memory allocated */ - size_t fac_size_final; /* Final amount of factory memory allocated */ - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Free-list API calls")); - - /* Create dataspace */ - /* (Allocates array free-list nodes) */ - sid = H5Screate_simple(MISC35_SPACE_RANK, dims, NULL); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Select sequence of ten points */ - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)MISC35_NPOINTS, (const hsize_t *)coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Retrieve initial free list values */ - ret = H5get_free_list_sizes(®_size_start, &arr_size_start, &blk_size_start, &fac_size_start); - CHECK(ret, FAIL, "H5get_free_list_sizes"); - -#if !defined H5_NO_FREE_LISTS && !defined H5_USING_MEMCHECKER - /* All the free list values should be >0 */ - CHECK(reg_size_start, 0, "H5get_free_list_sizes"); - CHECK(arr_size_start, 0, "H5get_free_list_sizes"); - CHECK(blk_size_start, 0, "H5get_free_list_sizes"); - CHECK(fac_size_start, 0, "H5get_free_list_sizes"); -#else - /* All the values should be == 0 */ - VERIFY(reg_size_start, 0, "H5get_free_list_sizes"); - VERIFY(arr_size_start, 0, "H5get_free_list_sizes"); - VERIFY(blk_size_start, 0, "H5get_free_list_sizes"); - VERIFY(fac_size_start, 0, "H5get_free_list_sizes"); -#endif - - /* Garbage collect the free lists */ - ret = H5garbage_collect(); - CHECK(ret, FAIL, "H5garbage_collect"); - - /* Retrieve free list values again */ - ret = H5get_free_list_sizes(®_size_final, &arr_size_final, &blk_size_final, &fac_size_final); - CHECK(ret, FAIL, "H5get_free_list_sizes"); - - /* All the free list values should be <= previous values */ - if (reg_size_final > reg_size_start) - ERROR("reg_size_final > reg_size_start"); - if (arr_size_final > arr_size_start) - ERROR("arr_size_final > arr_size_start"); - if (blk_size_final > blk_size_start) - ERROR("blk_size_final > blk_size_start"); - if (fac_size_final > fac_size_start) - ERROR("fac_size_final > fac_size_start"); - -} /* end test_misc35() */ -#endif - -/* Context to pass to 'atclose' callbacks */ -static int test_misc36_context; - -/* 'atclose' callbacks for test_misc36 */ -static void -test_misc36_cb1(void *_ctx) -{ - int *ctx = (int *)_ctx; /* Set up context pointer */ - bool is_terminating; /* Flag indicating the library is terminating */ - herr_t ret; /* Return value */ - - /* Check whether the library thinks it's terminating */ - is_terminating = false; - ret = H5is_library_terminating(&is_terminating); - CHECK(ret, FAIL, "H5is_library_terminating"); - VERIFY(is_terminating, true, "H5is_library_terminating"); - - /* Verify correct ordering for 'atclose' callbacks */ - if (0 != *ctx) - HDabort(); - - /* Update context value */ - *ctx = 1; -} - -static void -test_misc36_cb2(void *_ctx) -{ - int *ctx = (int *)_ctx; /* Set up context pointer */ - bool is_terminating; /* Flag indicating the library is terminating */ - herr_t ret; /* Return value */ - - /* Check whether the library thinks it's terminating */ - is_terminating = false; - ret = H5is_library_terminating(&is_terminating); - CHECK(ret, FAIL, "H5is_library_terminating"); - VERIFY(is_terminating, true, "H5is_library_terminating"); - - /* Verify correct ordering for 'atclose' callbacks */ - if (1 != *ctx) - HDabort(); - - /* Update context value */ - *ctx = 2; -} - -/**************************************************************** -** -** test_misc36(): Exercise H5atclose and H5is_library_terminating -** -****************************************************************/ -static void -test_misc36(void) -{ - bool is_terminating; /* Flag indicating the library is terminating */ - herr_t ret; /* Return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("H5atclose and H5is_library_terminating API calls")); - - /* Check whether the library thinks it's terminating */ - is_terminating = true; - ret = H5is_library_terminating(&is_terminating); - CHECK(ret, FAIL, "H5is_library_terminating"); - VERIFY(is_terminating, false, "H5is_library_terminating"); - - /* Shut the library down */ - test_misc36_context = 0; - H5close(); - - /* Check whether the library thinks it's terminating */ - is_terminating = true; - ret = H5is_library_terminating(&is_terminating); - CHECK(ret, FAIL, "H5is_library_terminating"); - VERIFY(is_terminating, false, "H5is_library_terminating"); - - /* Check the close context was not changed */ - VERIFY(test_misc36_context, 0, "H5atclose"); - - /* Restart the library */ - H5open(); - - /* Check whether the library thinks it's terminating */ - is_terminating = true; - ret = H5is_library_terminating(&is_terminating); - CHECK(ret, FAIL, "H5is_library_terminating"); - VERIFY(is_terminating, false, "H5is_library_terminating"); - - /* Register the 'atclose' callbacks */ - /* (Note that these will be called in reverse order, which is checked) */ - ret = H5atclose(&test_misc36_cb2, &test_misc36_context); - CHECK(ret, FAIL, "H5atclose"); - ret = H5atclose(&test_misc36_cb1, &test_misc36_context); - CHECK(ret, FAIL, "H5atclose"); - - /* Shut the library down */ - test_misc36_context = 0; - H5close(); - - /* Check the close context was changed correctly */ - VERIFY(test_misc36_context, 2, "H5atclose"); - - /* Restart the library */ - H5open(); - - /* Close the library again */ - test_misc36_context = 0; - H5close(); - - /* Check the close context was not changed */ - VERIFY(test_misc36_context, 0, "H5atclose"); -} /* end test_misc36() */ - -#if 0 -/**************************************************************** -** -** test_misc37(): -** Test for seg fault issue when closing the provided test file -** which has an illegal file size in its cache image. -** See HDFFV-11052/CVE-2020-10812 for details. -** -****************************************************************/ -static void -test_misc37(void) -{ - const char *testfile = H5_get_srcdir_filename(CVE_2020_10812_FILENAME); - bool driver_is_default_compatible; - hid_t fid; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Fix for HDFFV-11052/CVE-2020-10812")); - - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - - fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* This should fail due to the illegal file size. - It should fail gracefully and not seg fault */ - H5E_BEGIN_TRY - { - ret = H5Fclose(fid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fclose"); - -} /* end test_misc37() */ -#endif - -/**************************************************************** -** -** test_misc(): Main misc. test routine. -** -****************************************************************/ -void -test_misc(void) -{ - bool default_driver = h5_using_default_driver(NULL); - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Miscellaneous Routines\n")); - - test_misc1(); /* Test unlinking a dataset & immediately re-using name */ - test_misc2(); /* Test storing a VL-derived datatype in two different files */ - test_misc3(); /* Test reading from chunked dataset with non-zero fill value */ - test_misc4(); /* Test retrieving the fileno for various objects with H5Oget_info() */ - test_misc5(); /* Test several level deep nested compound & VL datatypes */ - test_misc6(); /* Test object header continuation code */ -#if 0 - test_misc7(); /* Test for sensible datatypes stored on disk */ - test_misc8(); /* Test storage sizes of various types of dataset storage */ -#endif - test_misc9(); /* Test for opening (not creating) core files */ -#if 0 - test_misc10(); /* Test for using dataset creation property lists from old files */ -#endif - - if (default_driver) { - test_misc11(); /* Test for all properties of a file creation property list being stored */ - } - - test_misc12(); /* Test VL-strings in chunked datasets operating correctly */ -#if 0 - if (default_driver) { - test_misc13(); /* Test that a user block can be insert in front of file contents */ - } -#endif - test_misc14(); /* Test that deleted dataset's data is removed from sieve buffer correctly */ - test_misc15(); /* Test that checking a file's access property list more than once works */ - test_misc16(); /* Test array of fixed-length string */ - test_misc17(); /* Test array of ASCII character */ - test_misc18(); /* Test new object header information in H5O_info2_t struct */ - test_misc19(); /* Test incrementing & decrementing ref count on IDs */ -#if 0 - test_misc20(); /* Test problems with truncated dimensions in version 2 of storage layout message */ -#endif -#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) - test_misc21(); /* Test that "late" allocation time is treated the same as "incremental", for chunked - datasets w/a filters */ - test_misc22(); /* check szip bits per pixel */ -#endif /* H5_HAVE_FILTER_SZIP */ - test_misc23(); /* Test intermediate group creation */ - test_misc24(); /* Test inappropriate API opens of objects */ - test_misc25a(); /* Exercise null object header message merge bug */ -#if 0 - test_misc25b(); /* Exercise null object header message merge bug on existing file */ -#endif - test_misc25c(); /* Exercise another null object header message merge bug */ - test_misc26(); /* Test closing property lists with long filter pipelines */ -#if 0 - test_misc27(); /* Test opening file with object that has bad # of object header messages */ -#endif - test_misc28(); /* Test that chunks are cached appropriately */ -#if 0 - test_misc29(); /* Test that speculative metadata reads are handled correctly */ - test_misc30(); /* Exercise local heap loading bug where free lists were getting dropped */ - - if (default_driver) { - test_misc31(); /* Test Reentering library through deprecated routines after H5close() */ - } -#endif - test_misc32(); /* Test filter memory allocation functions */ -#if 0 - test_misc33(); /* Test to verify that H5HL_offset_into() returns error if offset exceeds heap block */ - test_misc34(); /* Test behavior of 0 and NULL in H5MM API calls */ - test_misc35(); /* Test behavior of free-list & allocation statistics API calls */ -#endif - test_misc36(); /* Exercise H5atclose and H5is_library_terminating */ -#if 0 - test_misc37(); /* Test for seg fault failure at file close */ -#endif -} /* test_misc() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_misc - * - * Purpose: Cleanup temporary test files - * - * Return: none - * - *------------------------------------------------------------------------- - */ -void -cleanup_misc(void) -{ - H5Fdelete(MISC1_FILE, H5P_DEFAULT); - H5Fdelete(MISC2_FILE_1, H5P_DEFAULT); - H5Fdelete(MISC2_FILE_2, H5P_DEFAULT); - H5Fdelete(MISC3_FILE, H5P_DEFAULT); - H5Fdelete(MISC4_FILE_1, H5P_DEFAULT); - H5Fdelete(MISC4_FILE_2, H5P_DEFAULT); - H5Fdelete(MISC5_FILE, H5P_DEFAULT); - H5Fdelete(MISC6_FILE, H5P_DEFAULT); - H5Fdelete(MISC7_FILE, H5P_DEFAULT); - H5Fdelete(MISC8_FILE, H5P_DEFAULT); - H5Fdelete(MISC9_FILE, H5P_DEFAULT); - H5Fdelete(MISC10_FILE_NEW, H5P_DEFAULT); - H5Fdelete(MISC11_FILE, H5P_DEFAULT); - H5Fdelete(MISC12_FILE, H5P_DEFAULT); - H5Fdelete(MISC13_FILE_1, H5P_DEFAULT); - H5Fdelete(MISC13_FILE_2, H5P_DEFAULT); - H5Fdelete(MISC14_FILE, H5P_DEFAULT); - H5Fdelete(MISC15_FILE, H5P_DEFAULT); - H5Fdelete(MISC16_FILE, H5P_DEFAULT); - H5Fdelete(MISC17_FILE, H5P_DEFAULT); - H5Fdelete(MISC18_FILE, H5P_DEFAULT); - H5Fdelete(MISC19_FILE, H5P_DEFAULT); - H5Fdelete(MISC20_FILE, H5P_DEFAULT); -#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS) - H5Fdelete(MISC21_FILE, H5P_DEFAULT); - H5Fdelete(MISC22_FILE, H5P_DEFAULT); -#endif /* H5_HAVE_FILTER_SZIP */ - H5Fdelete(MISC23_FILE, H5P_DEFAULT); - H5Fdelete(MISC24_FILE, H5P_DEFAULT); - H5Fdelete(MISC25A_FILE, H5P_DEFAULT); - H5Fdelete(MISC25C_FILE, H5P_DEFAULT); - H5Fdelete(MISC26_FILE, H5P_DEFAULT); - H5Fdelete(MISC28_FILE, H5P_DEFAULT); - H5Fdelete(MISC29_COPY_FILE, H5P_DEFAULT); - H5Fdelete(MISC30_FILE, H5P_DEFAULT); -#ifndef H5_NO_DEPRECATED_SYMBOLS - H5Fdelete(MISC31_FILE, H5P_DEFAULT); -#endif /* H5_NO_DEPRECATED_SYMBOLS */ -} /* end cleanup_misc() */ diff --git a/test/API/trefer.c b/test/API/trefer.c deleted file mode 100644 index e8abe16779b..00000000000 --- a/test/API/trefer.c +++ /dev/null @@ -1,3636 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: trefer - * - * Test the Reference functionality - * - *************************************************************/ - -#include "testhdf5.h" - -#define FILE_REF_PARAM "trefer_param.h5" -#define FILE_REF_OBJ "trefer_obj.h5" -#define FILE_REF_VL_OBJ "trefer_vl_obj.h5" -#define FILE_REF_CMPND_OBJ "trefer_cmpnd_obj.h5" -#define FILE_REF_REG "trefer_reg.h5" -#define FILE_REF_REG_1D "trefer_reg_1d.h5" -#define FILE_REF_OBJ_DEL "trefer_obj_del.h5" -#define FILE_REF_GRP "trefer_grp.h5" -#define FILE_REF_ATTR "trefer_attr.h5" -#define FILE_REF_EXT1 "trefer_ext1.h5" -#define FILE_REF_EXT2 "trefer_ext2.h5" -#define FILE_REF_COMPAT "trefer_compat.h5" - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -/* 2-D dataset with fixed dimensions */ -#define SPACE2_RANK 2 -#define SPACE2_DIM1 10 -#define SPACE2_DIM2 10 - -/* Larger 1-D dataset with fixed dimensions */ -#define SPACE3_RANK 1 -#define SPACE3_DIM1 100 - -/* Element selection information */ -#define POINT1_NPOINTS 10 - -/* Compound datatype */ -typedef struct s1_t { - unsigned int a; - unsigned int b; - float c; -} s1_t; - -/* Compound datatype with reference */ -typedef struct s2_t { - H5R_ref_t ref0; /* reference */ - H5R_ref_t ref1; /* reference */ - H5R_ref_t ref2; /* reference */ - H5R_ref_t ref3; /* reference */ - unsigned int dim_idx; /* dimension index of the dataset */ -} s2_t; - -#define GROUPNAME "/group" -#define GROUPNAME2 "group2" -#define GROUPNAME3 "group3" -#define DSETNAME "/dset" -#define DSETNAME2 "dset2" -#define NAME_SIZE 16 - -#define MAX_ITER_CREATE 1000 -#define MAX_ITER_WRITE MAX_ITER_CREATE -#define MAX_ITER_READ MAX_ITER_CREATE - -/**************************************************************** -** -** test_reference_params(): Test basic H5R (reference) parameters -** for correct processing -** -****************************************************************/ -static void -test_reference_params(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t group; /* Group ID */ - hid_t attr; /* Attribute ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hid_t aapl_id; /* Attribute access property list */ - hid_t dapl_id; /* Dataset access property list */ - hsize_t dims1[] = {SPACE1_DIM1}; - H5R_ref_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temp. buffer read from disk */ - unsigned *obuf; - H5R_type_t type; /* Reference type */ - unsigned int i; /* Counters */ -#if 0 - const char *write_comment = "Foo!"; /* Comments for group */ -#endif - hid_t ret_id; /* Generic hid_t return value */ - ssize_t name_size; /* Size of reference name */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Reference Parameters\n")); - - /* Allocate write & read buffers */ - wbuf = (H5R_ref_t *)calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - rbuf = (H5R_ref_t *)calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - tbuf = (H5R_ref_t *)calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - obuf = calloc(sizeof(unsigned), SPACE1_DIM1); - - for (i = 0; i < SPACE1_DIM1; i++) - obuf[i] = i * 3; - - /* Create file */ - fid1 = H5Fcreate(FILE_REF_PARAM, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create attribute access property list */ - aapl_id = H5Pcreate(H5P_ATTRIBUTE_ACCESS); - CHECK(aapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a group */ - group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); -#if 0 - /* Set group's comment */ - ret = H5Oset_comment(group, write_comment); - CHECK(ret, FAIL, "H5Oset_comment"); -#endif - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create an attribute for the dataset */ - attr = H5Acreate2(dataset, "Attr", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - /* Write attribute to disk */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, obuf); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a datatype to refer to */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, H5I_INVALID_HID, "H5Dcreate2"); - - /* Test parameters to H5Rcreate_object */ - H5E_BEGIN_TRY - { - ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_object ref"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_object(H5I_INVALID_HID, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_object loc_id"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_object(fid1, NULL, H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_object name"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_object(fid1, "", H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_object null name"); - - /* Test parameters to H5Rcreate_region */ - H5E_BEGIN_TRY - { - ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_region ref"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_region(H5I_INVALID_HID, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_region loc_id"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_region(fid1, NULL, sid1, H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_region name"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_region(fid1, "/Group1/Dataset1", H5I_INVALID_HID, H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_region dataspace"); - - /* Test parameters to H5Rcreate_attr */ - H5E_BEGIN_TRY - { - ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr", H5P_DEFAULT, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_attr ref"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_attr(H5I_INVALID_HID, "/Group1/Dataset2", "Attr", H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_attr loc_id"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_attr(fid1, NULL, "Attr", H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_attr name"); - H5E_BEGIN_TRY - { - ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", NULL, H5P_DEFAULT, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcreate_attr attr_name"); - - /* Test parameters to H5Rdestroy */ - H5E_BEGIN_TRY - { - ret = H5Rdestroy(NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rdestroy"); - - /* Test parameters to H5Rget_type */ - H5E_BEGIN_TRY - { - type = H5Rget_type(NULL); - } - H5E_END_TRY - VERIFY(type, H5R_BADTYPE, "H5Rget_type ref"); - - /* Test parameters to H5Requal */ - H5E_BEGIN_TRY - { - ret = H5Requal(NULL, &rbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Requal ref1"); - H5E_BEGIN_TRY - { - ret = H5Requal(&rbuf[0], NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Requal ref2"); - - /* Test parameters to H5Rcopy */ - H5E_BEGIN_TRY - { - ret = H5Rcopy(NULL, &wbuf[0]); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcopy src_ref"); - H5E_BEGIN_TRY - { - ret = H5Rcopy(&rbuf[0], NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rcopy dest_ref"); - - /* Test parameters to H5Ropen_object */ - H5E_BEGIN_TRY - { - dset2 = H5Ropen_object(&rbuf[0], H5I_INVALID_HID, H5I_INVALID_HID); - } - H5E_END_TRY - VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object oapl_id"); - H5E_BEGIN_TRY - { - dset2 = H5Ropen_object(NULL, H5P_DEFAULT, dapl_id); - } - H5E_END_TRY - VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object ref"); - - /* Test parameters to H5Ropen_region */ - H5E_BEGIN_TRY - { - ret_id = H5Ropen_region(NULL, H5I_INVALID_HID, H5I_INVALID_HID); - } - H5E_END_TRY - VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_region ref"); - - /* Test parameters to H5Ropen_attr */ - H5E_BEGIN_TRY - { - ret_id = H5Ropen_attr(NULL, H5P_DEFAULT, aapl_id); - } - H5E_END_TRY - VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_attr ref"); - - /* Test parameters to H5Rget_obj_type3 */ - H5E_BEGIN_TRY - { - ret = H5Rget_obj_type3(NULL, H5P_DEFAULT, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rget_obj_type3 ref"); - - /* Test parameters to H5Rget_file_name */ - H5E_BEGIN_TRY - { - name_size = H5Rget_file_name(NULL, NULL, 0); - } - H5E_END_TRY - VERIFY(name_size, (-1), "H5Rget_file_name ref"); - - /* Test parameters to H5Rget_obj_name */ - H5E_BEGIN_TRY - { - name_size = H5Rget_obj_name(NULL, H5P_DEFAULT, NULL, 0); - } - H5E_END_TRY - VERIFY(name_size, (-1), "H5Rget_obj_name ref"); - - /* Test parameters to H5Rget_attr_name */ - H5E_BEGIN_TRY - { - name_size = H5Rget_attr_name(NULL, NULL, 0); - } - H5E_END_TRY - VERIFY(name_size, (-1), "H5Rget_attr_name ref"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close attribute access property list */ - ret = H5Pclose(aapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(tbuf); - free(obuf); -} /* test_reference_params() */ - -/**************************************************************** -** -** test_reference_obj(): Test basic H5R (reference) object reference code. -** Tests references to various kinds of objects -** -****************************************************************/ -static void -test_reference_obj(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t group; /* Group ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hid_t dapl_id; /* Dataset access property list */ - H5R_ref_t *wbuf, /* buffer to write to disk */ - *rbuf; /* buffer read from disk */ - unsigned *ibuf, *obuf; - unsigned i, j; /* Counters */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Object Reference Functions\n")); - - /* Allocate write & read buffers */ - wbuf = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - rbuf = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - ibuf = calloc(sizeof(unsigned), SPACE1_DIM1); - obuf = calloc(sizeof(unsigned), SPACE1_DIM1); - - for (i = 0; i < SPACE1_DIM1; i++) - obuf[i] = i * 3; - - /* Create file */ - fid1 = H5Fcreate(FILE_REF_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a group */ - group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a datatype to refer to */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &wbuf[1]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[1], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to group */ - ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &wbuf[2]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); - - /* Create reference to named datatype */ - ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &wbuf[3]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[3], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Open dataset object */ - dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset2); - CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid1); - VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); - CHECK(ret, FAIL, "H5Dread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(ibuf[i], i * 3, "Data"); - - /* Close dereferenced Dataset */ - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */ - group = H5Ropen_object(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Ropen_object"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */ - tid1 = H5Ropen_object(&rbuf[3], H5P_DEFAULT, H5P_DEFAULT); - CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object"); - - /* Verify correct datatype */ - { - H5T_class_t tclass; - - tclass = H5Tget_class(tid1); - VERIFY(tclass, H5T_COMPOUND, "H5Tget_class"); - - ret = H5Tget_nmembers(tid1); - VERIFY(ret, 3, "H5Tget_nmembers"); - } - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy references */ - for (j = 0; j < SPACE1_DIM1; j++) { - ret = H5Rdestroy(&wbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&rbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(ibuf); - free(obuf); -} /* test_reference_obj() */ - -/**************************************************************** -** -** test_reference_vlen_obj(): Test basic H5R (reference) object reference -** within a vlen type. -** Tests references to various kinds of objects -** -****************************************************************/ -static void -test_reference_vlen_obj(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t group; /* Group ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t vl_dims[] = {1}; - hid_t dapl_id; /* Dataset access property list */ - H5R_ref_t *wbuf, /* buffer to write to disk */ - *rbuf = NULL; /* buffer read from disk */ - unsigned *ibuf, *obuf; - unsigned i, j; /* Counters */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - hvl_t vl_wbuf = {0, NULL}, vl_rbuf = {0, NULL}; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Object Reference Functions within VLEN type\n")); - - /* Allocate write & read buffers */ - wbuf = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - ibuf = calloc(sizeof(unsigned), SPACE1_DIM1); - obuf = calloc(sizeof(unsigned), SPACE1_DIM1); - - for (i = 0; i < SPACE1_DIM1; i++) - obuf[i] = i * 3; - - /* Create file */ - fid1 = H5Fcreate(FILE_REF_VL_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a group */ - group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create a datatype to refer to */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create vlen type */ - tid1 = H5Tvlen_create(H5T_STD_REF); - CHECK(tid1, H5I_INVALID_HID, "H5Tvlen_create"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, vl_dims, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &wbuf[1]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[1], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to group */ - ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &wbuf[2]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); - - /* Create reference to named datatype */ - ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &wbuf[3]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[3], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); - - /* Store references into vlen */ - vl_wbuf.len = SPACE1_DIM1; - vl_wbuf.p = wbuf; - - /* Write selection to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &vl_wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_VL_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - tid1 = H5Dget_type(dataset); - CHECK(tid1, H5I_INVALID_HID, "H5Dget_type"); - - /* Read selection from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &vl_rbuf); - CHECK(ret, FAIL, "H5Dread"); - - VERIFY(vl_rbuf.len, SPACE1_DIM1, "H5Dread"); - rbuf = vl_rbuf.p; - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open dataset object */ - dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset2); - CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid1); - VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); - CHECK(ret, FAIL, "H5Dread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(ibuf[i], i * 3, "Data"); - - /* Close dereferenced Dataset */ - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */ - group = H5Ropen_object(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Ropen_object"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */ - tid1 = H5Ropen_object(&rbuf[3], H5P_DEFAULT, H5P_DEFAULT); - CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object"); - - /* Verify correct datatype */ - { - H5T_class_t tclass; - - tclass = H5Tget_class(tid1); - VERIFY(tclass, H5T_COMPOUND, "H5Tget_class"); - - ret = H5Tget_nmembers(tid1); - VERIFY(ret, 3, "H5Tget_nmembers"); - } - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy references */ - for (j = 0; j < SPACE1_DIM1; j++) { - ret = H5Rdestroy(&wbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&rbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(ibuf); - free(obuf); -} /* test_reference_vlen_obj() */ - -/**************************************************************** -** -** test_reference_cmpnd_obj(): Test basic H5R (reference) object reference -** within a compound type. -** Tests references to various kinds of objects -** -****************************************************************/ -static void -test_reference_cmpnd_obj(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t group; /* Group ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t cmpnd_dims[] = {1}; - hid_t dapl_id; /* Dataset access property list */ - unsigned *ibuf, *obuf; - unsigned i; /* Counter */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - s2_t cmpnd_wbuf, cmpnd_rbuf; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Object Reference Functions within compound type\n")); - - /* Allocate write & read buffers */ - ibuf = calloc(sizeof(unsigned), SPACE1_DIM1); - obuf = calloc(sizeof(unsigned), SPACE1_DIM1); - - for (i = 0; i < SPACE1_DIM1; i++) - obuf[i] = i * 3; - - /* Create file */ - fid1 = H5Fcreate(FILE_REF_CMPND_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a group */ - group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create a datatype to refer to */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create compound type */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s2_t)); - CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid1, "ref0", HOFFSET(s2_t, ref0), H5T_STD_REF); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "ref1", HOFFSET(s2_t, ref1), H5T_STD_REF); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "ref2", HOFFSET(s2_t, ref2), H5T_STD_REF); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "ref3", HOFFSET(s2_t, ref3), H5T_STD_REF); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "dim_idx", HOFFSET(s2_t, dim_idx), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, cmpnd_dims, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Reset buffer for writing */ - memset(&cmpnd_wbuf, 0, sizeof(cmpnd_wbuf)); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &cmpnd_wbuf.ref0); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&cmpnd_wbuf.ref0, H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &cmpnd_wbuf.ref1); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&cmpnd_wbuf.ref1, H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to group */ - ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &cmpnd_wbuf.ref2); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&cmpnd_wbuf.ref2, H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); - - /* Create reference to named datatype */ - ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &cmpnd_wbuf.ref3); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&cmpnd_wbuf.ref3, H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); - - /* Store dimensions */ - cmpnd_wbuf.dim_idx = SPACE1_DIM1; - - /* Write selection to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &cmpnd_wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_CMPND_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - tid1 = H5Dget_type(dataset); - CHECK(tid1, H5I_INVALID_HID, "H5Dget_type"); - - /* Read selection from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &cmpnd_rbuf); - CHECK(ret, FAIL, "H5Dread"); - - VERIFY(cmpnd_rbuf.dim_idx, SPACE1_DIM1, "H5Dread"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Open dataset object */ - dset2 = H5Ropen_object(&cmpnd_rbuf.ref0, H5P_DEFAULT, dapl_id); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset2); - CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid1); - VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); - CHECK(ret, FAIL, "H5Dread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(ibuf[i], i * 3, "Data"); - - /* Close dereferenced Dataset */ - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */ - group = H5Ropen_object(&cmpnd_rbuf.ref2, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Ropen_object"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */ - tid1 = H5Ropen_object(&cmpnd_rbuf.ref3, H5P_DEFAULT, H5P_DEFAULT); - CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object"); - - /* Verify correct datatype */ - { - H5T_class_t tclass; - - tclass = H5Tget_class(tid1); - VERIFY(tclass, H5T_COMPOUND, "H5Tget_class"); - - ret = H5Tget_nmembers(tid1); - VERIFY(ret, 3, "H5Tget_nmembers"); - } - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy references */ - ret = H5Rdestroy(&cmpnd_wbuf.ref0); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&cmpnd_wbuf.ref1); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&cmpnd_wbuf.ref2); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&cmpnd_wbuf.ref3); - CHECK(ret, FAIL, "H5Rdestroy"); - - ret = H5Rdestroy(&cmpnd_rbuf.ref0); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&cmpnd_rbuf.ref1); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&cmpnd_rbuf.ref2); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&cmpnd_rbuf.ref3); - CHECK(ret, FAIL, "H5Rdestroy"); - - /* Free memory buffers */ - free(ibuf); - free(obuf); -} /* test_reference_cmpnd_obj() */ - -/**************************************************************** -** -** test_reference_region(): Test basic H5R (reference) object reference code. -** Tests references to various kinds of objects -** -** Note: The libver_low/libver_high parameters are added to create the file -** with the low and high bounds setting in fapl. -** Please see the RFC for "H5Sencode/H5Sdecode Format Change". -** -****************************************************************/ -static void -test_reference_region(H5F_libver_t libver_low, H5F_libver_t libver_high) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t fapl; /* File access property list */ - hid_t dset1, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t sid1, /* Dataspace ID #1 */ - sid2; /* Dataspace ID #2 */ - hid_t dapl_id; /* Dataset access property list */ - hsize_t dims1[] = {SPACE1_DIM1}, dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ - hsize_t coord1[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ - hsize_t *coords; /* Coordinate buffer */ - hsize_t low[SPACE2_RANK]; /* Selection bounds */ - hsize_t high[SPACE2_RANK]; /* Selection bounds */ - H5R_ref_t *wbuf = NULL, /* buffer to write to disk */ - *rbuf = NULL; /* buffer read from disk */ - H5R_ref_t nvrbuf[3] = {{{{0}}}, {{{101}}}, {{{255}}}}; /* buffer with non-valid refs */ - uint8_t *dwbuf = NULL, /* Buffer for writing numeric data to disk */ - *drbuf = NULL; /* Buffer for reading numeric data from disk */ - uint8_t *tu8; /* Temporary pointer to uint8 data */ - H5O_type_t obj_type; /* Type of object */ - int i, j; /* Counters */ - hssize_t hssize_ret; /* hssize_t return value */ - htri_t tri_ret; /* htri_t return value */ - herr_t ret; /* Generic return value */ - hid_t dset_NA; /* Dataset id for undefined reference */ - hid_t space_NA; /* Dataspace id for undefined reference */ - hsize_t dims_NA[1] = {1}; /* Dims array for undefined reference */ - H5R_ref_t rdata_NA[1]; /* Read buffer */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataset Region Reference Functions\n")); - - /* Allocate write & read buffers */ - wbuf = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - rbuf = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - dwbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); - drbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); - - for (tu8 = dwbuf, i = 0; i < (SPACE2_DIM1 * SPACE2_DIM2); i++) - *tu8++ = (uint8_t)(i * 3); - - /* Create file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); - - /* Set the low/high version bounds in fapl */ - ret = H5Pset_libver_bounds(fapl, libver_low, libver_high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create file with the fapl */ - fid1 = H5Fcreate(FILE_REF_REG, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a dataset */ - dset2 = H5Dcreate2(fid1, "Dataset2", H5T_STD_U8LE, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset2, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dset2, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, dwbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create dataspace for the reference dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset */ - H5E_BEGIN_TRY - { - dset1 = H5Dcreate2(fid1, "Dataset1", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - - if (dset1 < 0) { - VERIFY(libver_high <= H5F_LIBVER_V110, true, "H5Dcreate2"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - } - else { - - CHECK(dset1, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create references */ - - /* Select 6x6 hyperslab for first reference */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 6; - block[1] = 6; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, 36, "H5Sget_select_npoints"); - - /* Store first dataset region */ - ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_region"); - ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Select sequence of ten points for second reference */ - coord1[0][0] = 6; - coord1[0][1] = 9; - coord1[1][0] = 2; - coord1[1][1] = 2; - coord1[2][0] = 8; - coord1[2][1] = 4; - coord1[3][0] = 1; - coord1[3][1] = 6; - coord1[4][0] = 2; - coord1[4][1] = 8; - coord1[5][0] = 3; - coord1[5][1] = 2; - coord1[6][0] = 0; - coord1[6][1] = 4; - coord1[7][0] = 9; - coord1[7][1] = 0; - coord1[8][0] = 7; - coord1[8][1] = 1; - coord1[9][0] = 3; - coord1[9][1] = 3; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, SPACE2_DIM2, "H5Sget_select_npoints"); - - /* Store second dataset region */ - ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[1]); - CHECK(ret, FAIL, "H5Rcreate_region"); - - /* Select unlimited hyperslab for third reference */ - start[0] = 1; - start[1] = 8; - stride[0] = 4; - stride[1] = 1; - count[0] = H5S_UNLIMITED; - count[1] = 1; - block[0] = 2; - block[1] = 2; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - hssize_ret = H5Sget_select_npoints(sid2); - VERIFY(hssize_ret, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints"); - - /* Store third dataset region */ - ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[2]); - CHECK(ret, FAIL, "H5Rcreate_region"); - - ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Store fourth dataset region */ - ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[3]); - CHECK(ret, FAIL, "H5Rcreate_region"); - - /* Write selection to disk */ - ret = H5Dwrite(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - - /* - * Store a dataset region reference which will not get written to disk - */ - - /* Create the dataspace of the region references */ - space_NA = H5Screate_simple(1, dims_NA, NULL); - CHECK(space_NA, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create the dataset and write the region references to it */ - dset_NA = H5Dcreate2(fid1, "DS_NA", H5T_STD_REF, space_NA, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset_NA, H5I_INVALID_HID, "H5Dcreate"); - - /* Close and release resources for undefined region reference tests */ - ret = H5Dclose(dset_NA); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(space_NA); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close uint8 dataset dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_REG, H5F_ACC_RDWR, fapl); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* - * Start the test of an undefined reference - */ - - /* Open the dataset of the undefined references */ - dset_NA = H5Dopen2(fid1, "DS_NA", H5P_DEFAULT); - CHECK(dset_NA, H5I_INVALID_HID, "H5Dopen2"); - - /* Read the data */ - ret = H5Dread(dset_NA, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_NA); - CHECK(ret, FAIL, "H5Dread"); - - /* - * Dereference an undefined reference (should fail) - */ - H5E_BEGIN_TRY - { - dset2 = H5Ropen_object(&rdata_NA[0], H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - /* Close and release resources. */ - ret = H5Dclose(dset_NA); - CHECK(ret, FAIL, "H5Dclose"); - - /* This close should fail since H5Ropen_object never created - * the id of the referenced object. */ - H5E_BEGIN_TRY - { - ret = H5Dclose(dset2); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dclose"); - - /* - * End the test of an undefined reference - */ - - /* Open the dataset */ - dset1 = H5Dopen2(fid1, "/Dataset1", H5P_DEFAULT); - CHECK(dset1, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Try to open objects */ - dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - /* Check what H5Rget_obj_type3 function returns */ - ret = H5Rget_obj_type3(&rbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset2); - CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid1); - VERIFY(ret, (SPACE2_DIM1 * SPACE2_DIM2), "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Dread(dset2, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, drbuf); - CHECK(ret, FAIL, "H5Dread"); - - for (tu8 = (uint8_t *)drbuf, i = 0; i < (SPACE2_DIM1 * SPACE2_DIM2); i++, tu8++) - VERIFY(*tu8, (uint8_t)(i * 3), "Data"); - - /* Get the hyperslab selection */ - sid2 = H5Ropen_region(&rbuf[0], H5P_DEFAULT, H5P_DEFAULT); - CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region"); - - /* Verify correct hyperslab selected */ - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, 36, "H5Sget_select_npoints"); - ret = (int)H5Sget_select_hyper_nblocks(sid2); - VERIFY(ret, 1, "H5Sget_select_hyper_nblocks"); - - /* allocate space for the hyperslab blocks */ - coords = (hsize_t *)malloc((size_t)ret * SPACE2_RANK * sizeof(hsize_t) * 2); - - ret = H5Sget_select_hyper_blocklist(sid2, (hsize_t)0, (hsize_t)ret, coords); - CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist"); - VERIFY(coords[0], 2, "Hyperslab Coordinates"); - VERIFY(coords[1], 2, "Hyperslab Coordinates"); - VERIFY(coords[2], 7, "Hyperslab Coordinates"); - VERIFY(coords[3], 7, "Hyperslab Coordinates"); - free(coords); - ret = H5Sget_select_bounds(sid2, low, high); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low[0], 2, "Selection Bounds"); - VERIFY(low[1], 2, "Selection Bounds"); - VERIFY(high[0], 7, "Selection Bounds"); - VERIFY(high[1], 7, "Selection Bounds"); - - /* Close region space */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Get the element selection */ - sid2 = H5Ropen_region(&rbuf[1], H5P_DEFAULT, H5P_DEFAULT); - CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region"); - - /* Verify correct elements selected */ - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, SPACE2_DIM2, "H5Sget_select_npoints"); - ret = (int)H5Sget_select_elem_npoints(sid2); - VERIFY(ret, SPACE2_DIM2, "H5Sget_select_elem_npoints"); - - /* allocate space for the element points */ - coords = (hsize_t *)malloc((size_t)ret * SPACE2_RANK * sizeof(hsize_t)); - - ret = H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)ret, coords); - CHECK(ret, FAIL, "H5Sget_select_elem_pointlist"); - VERIFY(coords[0], coord1[0][0], "Element Coordinates"); - VERIFY(coords[1], coord1[0][1], "Element Coordinates"); - VERIFY(coords[2], coord1[1][0], "Element Coordinates"); - VERIFY(coords[3], coord1[1][1], "Element Coordinates"); - VERIFY(coords[4], coord1[2][0], "Element Coordinates"); - VERIFY(coords[5], coord1[2][1], "Element Coordinates"); - VERIFY(coords[6], coord1[3][0], "Element Coordinates"); - VERIFY(coords[7], coord1[3][1], "Element Coordinates"); - VERIFY(coords[8], coord1[4][0], "Element Coordinates"); - VERIFY(coords[9], coord1[4][1], "Element Coordinates"); - VERIFY(coords[10], coord1[5][0], "Element Coordinates"); - VERIFY(coords[11], coord1[5][1], "Element Coordinates"); - VERIFY(coords[12], coord1[6][0], "Element Coordinates"); - VERIFY(coords[13], coord1[6][1], "Element Coordinates"); - VERIFY(coords[14], coord1[7][0], "Element Coordinates"); - VERIFY(coords[15], coord1[7][1], "Element Coordinates"); - VERIFY(coords[16], coord1[8][0], "Element Coordinates"); - VERIFY(coords[17], coord1[8][1], "Element Coordinates"); - VERIFY(coords[18], coord1[9][0], "Element Coordinates"); - VERIFY(coords[19], coord1[9][1], "Element Coordinates"); - free(coords); - ret = H5Sget_select_bounds(sid2, low, high); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low[0], 0, "Selection Bounds"); - VERIFY(low[1], 0, "Selection Bounds"); - VERIFY(high[0], 9, "Selection Bounds"); - VERIFY(high[1], 9, "Selection Bounds"); - - /* Close region space */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Get the unlimited selection */ - sid2 = H5Ropen_region(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT); - CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region"); - - /* Verify correct hyperslab selected */ - hssize_ret = H5Sget_select_npoints(sid2); - VERIFY(hssize_ret, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints"); - tri_ret = H5Sis_regular_hyperslab(sid2); - CHECK(tri_ret, FAIL, "H5Sis_regular_hyperslab"); - VERIFY(tri_ret, true, "H5Sis_regular_hyperslab Result"); - ret = H5Sget_regular_hyperslab(sid2, start, stride, count, block); - CHECK(ret, FAIL, "H5Sget_regular_hyperslab"); - VERIFY(start[0], (hsize_t)1, "Hyperslab Coordinates"); - VERIFY(start[1], (hsize_t)8, "Hyperslab Coordinates"); - VERIFY(stride[0], (hsize_t)4, "Hyperslab Coordinates"); - VERIFY(stride[1], (hsize_t)1, "Hyperslab Coordinates"); - VERIFY(count[0], H5S_UNLIMITED, "Hyperslab Coordinates"); - VERIFY(count[1], (hsize_t)1, "Hyperslab Coordinates"); - VERIFY(block[0], (hsize_t)2, "Hyperslab Coordinates"); - VERIFY(block[1], (hsize_t)2, "Hyperslab Coordinates"); - - /* Close region space */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close first space */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dereferenced Dataset */ - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Attempting to retrieve type of object using non-valid refs */ - for (j = 0; j < 3; j++) { - H5E_BEGIN_TRY - { - ret = H5Rget_obj_type3(&nvrbuf[j], H5P_DEFAULT, &obj_type); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Rget_obj_type3"); - } /* end for */ - - /* Close Dataset */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy references */ - for (j = 0; j < SPACE1_DIM1; j++) { - ret = H5Rdestroy(&wbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&rbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - } - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(dwbuf); - free(drbuf); - -} /* test_reference_region() */ - -/**************************************************************** -** -** test_reference_region_1D(): Test H5R (reference) object reference code. -** Tests 1-D references to various kinds of objects -** -** Note: The libver_low/libver_high parameters are added to create the file -** with the low and high bounds setting in fapl. -** Please see the RFC for "H5Sencode/H5Sdecode Format Change". -** -****************************************************************/ -static void -test_reference_region_1D(H5F_libver_t libver_low, H5F_libver_t libver_high) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t fapl; /* File access property list */ - hid_t dset1, /* Dataset ID */ - dset3; /* Dereferenced dataset ID */ - hid_t sid1, /* Dataspace ID #1 */ - sid3; /* Dataspace ID #3 */ - hid_t dapl_id; /* Dataset access property list */ - hsize_t dims1[] = {2}, /* Must be 2 */ - dims3[] = {SPACE3_DIM1}; - hsize_t start[SPACE3_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE3_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE3_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE3_RANK]; /* Block size of hyperslab */ - hsize_t coord1[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ - hsize_t *coords; /* Coordinate buffer */ - hsize_t low[SPACE3_RANK]; /* Selection bounds */ - hsize_t high[SPACE3_RANK]; /* Selection bounds */ - H5R_ref_t *wbuf = NULL, /* buffer to write to disk */ - *rbuf = NULL; /* buffer read from disk */ - uint8_t *dwbuf = NULL, /* Buffer for writing numeric data to disk */ - *drbuf = NULL; /* Buffer for reading numeric data from disk */ - uint8_t *tu8; /* Temporary pointer to uint8 data */ - H5O_type_t obj_type; /* Object type */ - int i; /* Counter */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 1-D Dataset Region Reference Functions\n")); - - /* Allocate write & read buffers */ - wbuf = calloc(sizeof(H5R_ref_t), (size_t)SPACE1_DIM1); - rbuf = calloc(sizeof(H5R_ref_t), (size_t)SPACE1_DIM1); - dwbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)SPACE3_DIM1); - drbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)SPACE3_DIM1); - - for (tu8 = dwbuf, i = 0; i < SPACE3_DIM1; i++) - *tu8++ = (uint8_t)(i * 3); - - /* Create the file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); - - /* Set the low/high version bounds in fapl */ - ret = H5Pset_libver_bounds(fapl, libver_low, libver_high); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); - - /* Create file with the fapl */ - fid1 = H5Fcreate(FILE_REF_REG_1D, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid3 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid3, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a dataset */ - dset3 = H5Dcreate2(fid1, "Dataset2", H5T_STD_U8LE, sid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset3, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dset3, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, dwbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create dataspace for the reference dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset */ - H5E_BEGIN_TRY - { - dset1 = H5Dcreate2(fid1, "Dataset1", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - } - H5E_END_TRY - - if (dset1 < 0) { - - VERIFY(libver_high <= H5F_LIBVER_V110, true, "H5Dcreate2"); - - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - } - else { - - CHECK(ret, FAIL, "H5Dcreate2"); - - /* Create references */ - - /* Select 15 2x1 hyperslabs for first reference */ - start[0] = 2; - stride[0] = 5; - count[0] = 15; - block[0] = 2; - ret = H5Sselect_hyperslab(sid3, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - ret = (int)H5Sget_select_npoints(sid3); - VERIFY(ret, (block[0] * count[0]), "H5Sget_select_npoints"); - - /* Store first dataset region */ - ret = H5Rcreate_region(fid1, "/Dataset2", sid3, H5P_DEFAULT, &wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_region"); - ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Select sequence of ten points for second reference */ - coord1[0][0] = 16; - coord1[1][0] = 22; - coord1[2][0] = 38; - coord1[3][0] = 41; - coord1[4][0] = 52; - coord1[5][0] = 63; - coord1[6][0] = 70; - coord1[7][0] = 89; - coord1[8][0] = 97; - coord1[9][0] = 03; - ret = H5Sselect_elements(sid3, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - ret = (int)H5Sget_select_npoints(sid3); - VERIFY(ret, POINT1_NPOINTS, "H5Sget_select_npoints"); - - /* Store second dataset region */ - ret = H5Rcreate_region(fid1, "/Dataset2", sid3, H5P_DEFAULT, &wbuf[1]); - CHECK(ret, FAIL, "H5Rcreate_region"); - - /* Write selection to disk */ - ret = H5Dwrite(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close uint8 dataset dataspace */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_REG_1D, H5F_ACC_RDWR, fapl); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* Open the dataset */ - dset1 = H5Dopen2(fid1, "/Dataset1", H5P_DEFAULT); - CHECK(dset1, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Try to open objects */ - dset3 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); - CHECK(dset3, H5I_INVALID_HID, "H5Ropen_object"); - - /* Check what H5Rget_obj_type3 function returns */ - ret = H5Rget_obj_type3(&rbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset3); - CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid1); - VERIFY(ret, SPACE3_DIM1, "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Dread(dset3, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, drbuf); - CHECK(ret, FAIL, "H5Dread"); - - for (tu8 = (uint8_t *)drbuf, i = 0; i < SPACE3_DIM1; i++, tu8++) - VERIFY(*tu8, (uint8_t)(i * 3), "Data"); - - /* Get the hyperslab selection */ - sid3 = H5Ropen_region(&rbuf[0], H5P_DEFAULT, H5P_DEFAULT); - CHECK(sid3, H5I_INVALID_HID, "H5Ropen_region"); - - /* Verify correct hyperslab selected */ - ret = (int)H5Sget_select_npoints(sid3); - VERIFY(ret, 30, "H5Sget_select_npoints"); - ret = (int)H5Sget_select_hyper_nblocks(sid3); - VERIFY(ret, 15, "H5Sget_select_hyper_nblocks"); - - /* allocate space for the hyperslab blocks */ - coords = (hsize_t *)malloc((size_t)ret * SPACE3_RANK * sizeof(hsize_t) * 2); - - ret = H5Sget_select_hyper_blocklist(sid3, (hsize_t)0, (hsize_t)ret, coords); - CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist"); - VERIFY(coords[0], 2, "Hyperslab Coordinates"); - VERIFY(coords[1], 3, "Hyperslab Coordinates"); - VERIFY(coords[2], 7, "Hyperslab Coordinates"); - VERIFY(coords[3], 8, "Hyperslab Coordinates"); - VERIFY(coords[4], 12, "Hyperslab Coordinates"); - VERIFY(coords[5], 13, "Hyperslab Coordinates"); - VERIFY(coords[6], 17, "Hyperslab Coordinates"); - VERIFY(coords[7], 18, "Hyperslab Coordinates"); - VERIFY(coords[8], 22, "Hyperslab Coordinates"); - VERIFY(coords[9], 23, "Hyperslab Coordinates"); - VERIFY(coords[10], 27, "Hyperslab Coordinates"); - VERIFY(coords[11], 28, "Hyperslab Coordinates"); - VERIFY(coords[12], 32, "Hyperslab Coordinates"); - VERIFY(coords[13], 33, "Hyperslab Coordinates"); - VERIFY(coords[14], 37, "Hyperslab Coordinates"); - VERIFY(coords[15], 38, "Hyperslab Coordinates"); - VERIFY(coords[16], 42, "Hyperslab Coordinates"); - VERIFY(coords[17], 43, "Hyperslab Coordinates"); - VERIFY(coords[18], 47, "Hyperslab Coordinates"); - VERIFY(coords[19], 48, "Hyperslab Coordinates"); - VERIFY(coords[20], 52, "Hyperslab Coordinates"); - VERIFY(coords[21], 53, "Hyperslab Coordinates"); - VERIFY(coords[22], 57, "Hyperslab Coordinates"); - VERIFY(coords[23], 58, "Hyperslab Coordinates"); - VERIFY(coords[24], 62, "Hyperslab Coordinates"); - VERIFY(coords[25], 63, "Hyperslab Coordinates"); - VERIFY(coords[26], 67, "Hyperslab Coordinates"); - VERIFY(coords[27], 68, "Hyperslab Coordinates"); - VERIFY(coords[28], 72, "Hyperslab Coordinates"); - VERIFY(coords[29], 73, "Hyperslab Coordinates"); - free(coords); - ret = H5Sget_select_bounds(sid3, low, high); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low[0], 2, "Selection Bounds"); - VERIFY(high[0], 73, "Selection Bounds"); - - /* Close region space */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Get the element selection */ - sid3 = H5Ropen_region(&rbuf[1], H5P_DEFAULT, H5P_DEFAULT); - CHECK(sid3, H5I_INVALID_HID, "H5Ropen_region"); - - /* Verify correct elements selected */ - ret = (int)H5Sget_select_npoints(sid3); - VERIFY(ret, 10, "H5Sget_select_npoints"); - ret = (int)H5Sget_select_elem_npoints(sid3); - VERIFY(ret, 10, "H5Sget_select_elem_npoints"); - - /* allocate space for the element points */ - coords = (hsize_t *)malloc((size_t)ret * SPACE3_RANK * sizeof(hsize_t)); - - ret = H5Sget_select_elem_pointlist(sid3, (hsize_t)0, (hsize_t)ret, coords); - CHECK(ret, FAIL, "H5Sget_select_elem_pointlist"); - VERIFY(coords[0], coord1[0][0], "Element Coordinates"); - VERIFY(coords[1], coord1[1][0], "Element Coordinates"); - VERIFY(coords[2], coord1[2][0], "Element Coordinates"); - VERIFY(coords[3], coord1[3][0], "Element Coordinates"); - VERIFY(coords[4], coord1[4][0], "Element Coordinates"); - VERIFY(coords[5], coord1[5][0], "Element Coordinates"); - VERIFY(coords[6], coord1[6][0], "Element Coordinates"); - VERIFY(coords[7], coord1[7][0], "Element Coordinates"); - VERIFY(coords[8], coord1[8][0], "Element Coordinates"); - VERIFY(coords[9], coord1[9][0], "Element Coordinates"); - free(coords); - ret = H5Sget_select_bounds(sid3, low, high); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low[0], 3, "Selection Bounds"); - VERIFY(high[0], 97, "Selection Bounds"); - - /* Close region space */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close first space */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dereferenced Dataset */ - ret = H5Dclose(dset3); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close Dataset */ - ret = H5Dclose(dset1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file access property list */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy references */ - for (i = 0; i < 2; i++) { - ret = H5Rdestroy(&wbuf[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&rbuf[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - } - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(dwbuf); - free(drbuf); - -} /* test_reference_region_1D() */ - -/**************************************************************** -** -** test_reference_obj_deleted(): Test H5R (reference) object reference code. -** Tests for correct failures for deleted and non-existent objects -** -****************************************************************/ -static void -test_reference_obj_deleted(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t sid1; /* Dataspace ID */ - H5R_ref_t oref; /* Object Reference to test */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - - MESSAGE(5, ("Testing References to Deleted Objects\n")); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_REF_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) && - (vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) { - /* Create file */ - fid1 = H5Fcreate(FILE_REF_OBJ_DEL, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create scalar dataspace for datasets */ - sid1 = H5Screate_simple(0, NULL, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset to reference (deleted later) */ - dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset2", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Dataset1", H5P_DEFAULT, &oref); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&oref, H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &oref); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Delete referenced dataset */ - ret = H5Ldelete(fid1, "/Dataset1", H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy reference */ - ret = H5Rdestroy(&oref); - CHECK(ret, FAIL, "H5Rdestroy"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_OBJ_DEL, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset2", H5P_DEFAULT); - CHECK(ret, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &oref); - CHECK(ret, FAIL, "H5Dread"); - - /* Open deleted dataset object */ - dset2 = H5Ropen_object(&oref, H5P_DEFAULT, H5P_DEFAULT); - VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy reference */ - ret = H5Rdestroy(&oref); - CHECK(ret, FAIL, "H5Rdestroy"); - } -} /* test_reference_obj_deleted() */ - -/**************************************************************** -** -** test_deref_iter_op(): Iterator callback for test_reference_group_iterate() -** test. -** -****************************************************************/ -static herr_t -test_deref_iter_op(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info2_t H5_ATTR_UNUSED *info, - void *op_data) -{ - int *count = (int *)op_data; /* Pointer to name counter */ - herr_t ret_value; - - /* Simple check for correct names */ - if (*count == 0) { - if (strcmp(name, DSETNAME2) == 0) - ret_value = 0; - else - ret_value = -1; - } /* end if */ - else if (*count == 1) { - if (strcmp(name, GROUPNAME2) == 0) - ret_value = 0; - else - ret_value = -1; - } /* end if */ - else if (*count == 2) { - if (strcmp(name, GROUPNAME3) == 0) - ret_value = 0; - else - ret_value = -1; - } /* end if */ - else - ret_value = -1; - - (*count)++; - - return (ret_value); -} /* end test_deref_iter_op() */ - -/**************************************************************** -** -** test_reference_group(): Test H5R (reference) object reference code. -** Tests for correct behavior of various routines on dereferenced group -** -****************************************************************/ -static void -test_reference_group(void) -{ - hid_t fid = -1; /* File ID */ - hid_t gid = -1, gid2 = -1; /* Group IDs */ - hid_t did; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - H5R_ref_t wref; /* Reference to write */ - H5R_ref_t rref; /* Reference to read */ - H5G_info_t ginfo; /* Group info struct */ - char objname[NAME_SIZE]; /* Buffer to store name */ - H5O_info2_t oinfo; /* Object info struct */ - int count = 0; /* Count within iterated group */ - ssize_t size; /* Name length */ - herr_t ret; - - /* Create file with a group and a dataset containing an object reference to the group */ - fid = H5Fcreate(FILE_REF_GRP, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace to use for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, H5I_INVALID_HID, "H5Screate"); - - /* Create group to refer to */ - gid = H5Gcreate2(fid, GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create nested groups */ - gid2 = H5Gcreate2(gid, GROUPNAME2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, H5I_INVALID_HID, "H5Gcreate2"); - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - gid2 = H5Gcreate2(gid, GROUPNAME3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid2, H5I_INVALID_HID, "H5Gcreate2"); - ret = H5Gclose(gid2); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create bottom dataset */ - did = H5Dcreate2(gid, DSETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create dataset */ - did = H5Dcreate2(fid, DSETNAME, H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create reference to group */ - ret = H5Rcreate_object(fid, GROUPNAME, H5P_DEFAULT, &wref); - CHECK(ret, FAIL, "H5Rcreate_object"); - - /* Write reference to disk */ - ret = H5Dwrite(did, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wref); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close objects */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy reference */ - ret = H5Rdestroy(&wref); - CHECK(ret, FAIL, "H5Rdestroy"); - - /* Re-open file */ - fid = H5Fopen(FILE_REF_GRP, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, H5I_INVALID_HID, "H5Fopen"); - - /* Re-open dataset */ - did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dopen2"); - - /* Read in the reference */ - ret = H5Dread(did, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rref); - CHECK(ret, FAIL, "H5Dread"); - - /* Dereference to get the group */ - gid = H5Ropen_object(&rref, H5P_DEFAULT, H5P_DEFAULT); - CHECK(gid, H5I_INVALID_HID, "H5Ropen_object"); - - /* Iterate through objects in dereferenced group */ - ret = H5Literate2(gid, H5_INDEX_NAME, H5_ITER_INC, NULL, test_deref_iter_op, &count); - CHECK(ret, FAIL, "H5Literate"); - - /* Various queries on the group opened */ - ret = H5Gget_info(gid, &ginfo); - CHECK(ret, FAIL, "H5Gget_info"); - VERIFY(ginfo.nlinks, 3, "H5Gget_info"); - - size = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, objname, (size_t)NAME_SIZE, - H5P_DEFAULT); - CHECK(size, (-1), "H5Lget_name_by_idx"); - VERIFY_STR(objname, DSETNAME2, "H5Lget_name_by_idx"); - - ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, &oinfo, H5O_INFO_BASIC, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_idx3"); - VERIFY(oinfo.type, H5O_TYPE_DATASET, "H5Oget_info_by_idx3"); - - /* Unlink one of the objects in the dereferenced group */ - ret = H5Ldelete(gid, GROUPNAME2, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Delete dataset object in dereferenced group (with other dataset still open) */ - ret = H5Ldelete(gid, DSETNAME2, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - - /* Close objects */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Gclose(gid); - CHECK(ret, FAIL, "H5Gclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy reference */ - ret = H5Rdestroy(&rref); - CHECK(ret, FAIL, "H5Rdestroy"); -} /* test_reference_group() */ - -/**************************************************************** -** -** test_reference_attr(): Test basic H5R (reference) attribute reference code. -** Tests references to attributes on various kinds of objects -** -****************************************************************/ -static void -test_reference_attr(void) -{ - hid_t fid; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t group; /* Group ID */ - hid_t attr; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims[] = {SPACE1_DIM1}; - hid_t dapl_id; /* Dataset access property list */ - H5R_ref_t ref_wbuf[SPACE1_DIM1], /* Buffer to write to disk */ - ref_rbuf[SPACE1_DIM1]; /* Buffer read from disk */ - unsigned wbuf[SPACE1_DIM1], rbuf[SPACE1_DIM1]; - unsigned i; /* Local index variables */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Attribute Reference Functions\n")); - - /* Create file */ - fid = H5Fcreate(FILE_REF_ATTR, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(SPACE1_RANK, dims, NULL); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a group */ - group = H5Gcreate2(fid, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create an attribute for the dataset */ - attr = H5Acreate2(group, "Attr2", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - for (i = 0; i < SPACE1_DIM1; i++) - wbuf[i] = (i * 3) + 1; - - /* Write attribute to disk */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create an attribute for the dataset */ - attr = H5Acreate2(dataset, "Attr1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - for (i = 0; i < SPACE1_DIM1; i++) - wbuf[i] = i * 3; - - /* Write attribute to disk */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a datatype to refer to */ - tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create an attribute for the datatype */ - attr = H5Acreate2(tid, "Attr3", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - for (i = 0; i < SPACE1_DIM1; i++) - wbuf[i] = (i * 3) + 2; - - /* Write attribute to disk */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid, "Dataset3", H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create reference to dataset1 attribute */ - ret = H5Rcreate_attr(fid, "/Group1/Dataset1", "Attr1", H5P_DEFAULT, &ref_wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to dataset2 attribute */ - ret = H5Rcreate_attr(fid, "/Group1/Dataset2", "Attr1", H5P_DEFAULT, &ref_wbuf[1]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[1], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to group attribute */ - ret = H5Rcreate_attr(fid, "/Group1", "Attr2", H5P_DEFAULT, &ref_wbuf[2]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[2], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); - - /* Create reference to named datatype attribute */ - ret = H5Rcreate_attr(fid, "/Group1/Datatype1", "Attr3", H5P_DEFAULT, &ref_wbuf[3]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[3], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid = H5Fopen(FILE_REF_ATTR, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid, "/Dataset3", H5P_DEFAULT); - CHECK(ret, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Open attribute on dataset object */ - attr = H5Ropen_attr(&ref_rbuf[0], H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); - - /* Check information in referenced dataset */ - sid = H5Aget_space(attr); - CHECK(sid, H5I_INVALID_HID, "H5Aget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid); - VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); - CHECK(ret, FAIL, "H5Aread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(rbuf[i], i * 3, "Data"); - - /* Close dereferenced Dataset */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open attribute on group object */ - attr = H5Ropen_attr(&ref_rbuf[2], H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); - - /* Read from disk */ - ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); - CHECK(ret, FAIL, "H5Aread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(rbuf[i], (i * 3) + 1, "Data"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open attribute on named datatype object */ - attr = H5Ropen_attr(&ref_rbuf[3], H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); - - /* Read from disk */ - ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); - CHECK(ret, FAIL, "H5Aread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(rbuf[i], (i * 3) + 2, "Data"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - for (i = 0; i < SPACE1_DIM1; i++) { - ret = H5Rdestroy(&ref_wbuf[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&ref_rbuf[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - } -} /* test_reference_attr() */ - -/**************************************************************** -** -** test_reference_external(): -** Tests external references on various kinds of objects -** -****************************************************************/ -static void -test_reference_external(void) -{ - hid_t fid1, fid2; /* HDF5 File ID */ - hid_t dataset; /* Dataset ID */ - hid_t group; /* Group ID */ - hid_t attr; /* Attribute ID */ - hid_t sid; /* Dataspace ID */ - hid_t tid; /* Datatype ID */ - hsize_t dims[] = {SPACE1_DIM1}; - hid_t dapl_id; /* Dataset access property list */ - H5R_ref_t ref_wbuf[SPACE1_DIM1], /* Buffer to write to disk */ - ref_rbuf[SPACE1_DIM1]; /* Buffer read from disk */ - unsigned wbuf[SPACE1_DIM1], rbuf[SPACE1_DIM1]; - unsigned i; /* Local index variables */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing External References Functions\n")); - - /* Create file */ - fid1 = H5Fcreate(FILE_REF_EXT1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(SPACE1_RANK, dims, NULL); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a group */ - group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create an attribute for the dataset */ - attr = H5Acreate2(group, "Attr2", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - for (i = 0; i < SPACE1_DIM1; i++) - wbuf[i] = (i * 3) + 1; - - /* Write attribute to disk */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create an attribute for the dataset */ - attr = H5Acreate2(dataset, "Attr1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - for (i = 0; i < SPACE1_DIM1; i++) - wbuf[i] = i * 3; - - /* Write attribute to disk */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a datatype to refer to */ - tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Create an attribute for the datatype */ - attr = H5Acreate2(tid, "Attr3", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Acreate2"); - - for (i = 0; i < SPACE1_DIM1; i++) - wbuf[i] = (i * 3) + 2; - - /* Write attribute to disk */ - ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf); - CHECK(ret, FAIL, "H5Awrite"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close datatype */ - ret = H5Tclose(tid); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create reference to dataset1 attribute */ - ret = H5Rcreate_attr(fid1, "/Group1/Dataset1", "Attr1", H5P_DEFAULT, &ref_wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to dataset2 attribute */ - ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr1", H5P_DEFAULT, &ref_wbuf[1]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[1], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Create reference to group attribute */ - ret = H5Rcreate_attr(fid1, "/Group1", "Attr2", H5P_DEFAULT, &ref_wbuf[2]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[2], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); - - /* Create reference to named datatype attribute */ - ret = H5Rcreate_attr(fid1, "/Group1/Datatype1", "Attr3", H5P_DEFAULT, &ref_wbuf[3]); - CHECK(ret, FAIL, "H5Rcreate_attr"); - ret = H5Rget_obj_type3(&ref_wbuf[3], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Create file */ - fid2 = H5Fcreate(FILE_REF_EXT2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid2, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid = H5Screate_simple(SPACE1_RANK, dims, NULL); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid2, "Dataset3", H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid2 = H5Fopen(FILE_REF_EXT2, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid2, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid2, "/Dataset3", H5P_DEFAULT); - CHECK(ret, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Open attribute on dataset object */ - attr = H5Ropen_attr(&ref_rbuf[0], H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); - - /* Check information in referenced dataset */ - sid = H5Aget_space(attr); - CHECK(sid, H5I_INVALID_HID, "H5Aget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid); - VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); - CHECK(ret, FAIL, "H5Aread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(rbuf[i], i * 3, "Data"); - - /* Close dereferenced Dataset */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open attribute on group object */ - attr = H5Ropen_attr(&ref_rbuf[2], H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); - - /* Read from disk */ - ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); - CHECK(ret, FAIL, "H5Aread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(rbuf[i], (i * 3) + 1, "Data"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Open attribute on named datatype object */ - attr = H5Ropen_attr(&ref_rbuf[3], H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); - - /* Read from disk */ - ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf); - CHECK(ret, FAIL, "H5Aread"); - - for (i = 0; i < SPACE1_DIM1; i++) - VERIFY(rbuf[i], (i * 3) + 2, "Data"); - - /* Close attribute */ - ret = H5Aclose(attr); - CHECK(ret, FAIL, "H5Aclose"); - - /* Close dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid2); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - for (i = 0; i < SPACE1_DIM1; i++) { - ret = H5Rdestroy(&ref_wbuf[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&ref_rbuf[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - } -} /* test_reference_external() */ - -/**************************************************************** -** -** test_reference_compat_conv(): Test basic H5R (reference) object reference code. -** Tests deprecated API routines and type conversion. -** -****************************************************************/ -#if 0 -static void -test_reference_compat_conv(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, dset2; /* Dataset ID */ - hid_t group, group2; /* Group ID */ - hid_t sid1, sid2, sid3; /* Dataspace IDs */ - hid_t tid1, tid2; /* Datatype ID */ - hsize_t dims1[] = {SPACE1_DIM1}, dims2[] = {SPACE2_DIM1, SPACE2_DIM2}, - dims3[] = {SPACE1_DIM1}; /* Purposely set dimension larger to test NULL references */ - hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ - hsize_t coord1[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ - hobj_ref_t *wbuf_obj = NULL; /* Buffer to write to disk */ - H5R_ref_t *rbuf_obj = NULL; /* Buffer read from disk */ - hdset_reg_ref_t *wbuf_reg = NULL; /* Buffer to write to disk */ - H5R_ref_t *rbuf_reg = NULL; /* Buffer read from disk */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - unsigned int i; /* Counter */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Deprecated Object Reference Functions\n")); - - /* Allocate write & read buffers */ - wbuf_obj = (hobj_ref_t *)calloc(sizeof(hobj_ref_t), SPACE1_DIM1); - rbuf_obj = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - wbuf_reg = calloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); - rbuf_reg = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - - /* Create file */ - fid1 = H5Fcreate(FILE_REF_COMPAT, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create another dataspace for datasets */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create another dataspace for datasets */ - sid3 = H5Screate_simple(SPACE1_RANK, dims3, NULL); - CHECK(sid3, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create a group */ - group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a datatype to refer to */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create a dataset with object reference datatype */ - dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Create reference to dataset */ - ret = H5Rcreate(&wbuf_obj[0], fid1, "/Group1/Dataset1", H5R_OBJECT, H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); - - /* Create reference to dataset */ - ret = H5Rcreate(&wbuf_obj[1], fid1, "/Group1/Dataset2", H5R_OBJECT, H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); - - /* Create reference to group */ - ret = H5Rcreate(&wbuf_obj[2], fid1, "/Group1", H5R_OBJECT, H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); - - /* Create reference to named datatype */ - ret = H5Rcreate(&wbuf_obj[3], fid1, "/Group1/Datatype1", H5R_OBJECT, H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); - - /* Write references to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_obj); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a dataset with region reference datatype */ - dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_DSETREG, sid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Select 6x6 hyperslab for first reference */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 6; - block[1] = 6; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create first dataset region */ - ret = H5Rcreate(&wbuf_reg[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION, sid2); - CHECK(ret, FAIL, "H5Rcreate"); - - /* Select sequence of ten points for second reference */ - coord1[0][0] = 6; - coord1[0][1] = 9; - coord1[1][0] = 2; - coord1[1][1] = 2; - coord1[2][0] = 8; - coord1[2][1] = 4; - coord1[3][0] = 1; - coord1[3][1] = 6; - coord1[4][0] = 2; - coord1[4][1] = 8; - coord1[5][0] = 3; - coord1[5][1] = 2; - coord1[6][0] = 0; - coord1[6][1] = 4; - coord1[7][0] = 9; - coord1[7][1] = 0; - coord1[8][0] = 7; - coord1[8][1] = 1; - coord1[9][0] = 3; - coord1[9][1] = 3; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create second dataset region */ - ret = H5Rcreate(&wbuf_reg[1], fid1, "/Group1/Dataset2", H5R_DATASET_REGION, sid2); - CHECK(ret, FAIL, "H5Rcreate"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close disk dataspaces */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_COMPAT, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* Open the object reference dataset */ - dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_obj); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify type of objects pointed at */ - ret = H5Rget_obj_type3(&rbuf_obj[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - ret = H5Rget_obj_type3(&rbuf_obj[1], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - ret = H5Rget_obj_type3(&rbuf_obj[2], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3"); - - ret = H5Rget_obj_type3(&rbuf_obj[3], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3"); - - /* Make sure the referenced objects can be opened */ - dset2 = H5Ropen_object(&rbuf_obj[0], H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - dset2 = H5Ropen_object(&rbuf_obj[1], H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - group2 = H5Ropen_object(&rbuf_obj[2], H5P_DEFAULT, H5P_DEFAULT); - CHECK(group2, H5I_INVALID_HID, "H5Ropen_object"); - - ret = H5Gclose(group2); - CHECK(ret, FAIL, "H5Gclose"); - - tid2 = H5Ropen_object(&rbuf_obj[3], H5P_DEFAULT, H5P_DEFAULT); - CHECK(tid2, H5I_INVALID_HID, "H5Ropen_object"); - - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open the dataset region reference dataset */ - dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify type of objects pointed at */ - ret = H5Rget_obj_type3(&rbuf_reg[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - ret = H5Rget_obj_type3(&rbuf_reg[1], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - /* Make sure the referenced objects can be opened */ - dset2 = H5Ropen_object(&rbuf_reg[0], H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - dset2 = H5Ropen_object(&rbuf_reg[1], H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy references */ - for (i = 0; i < dims1[0]; i++) { - ret = H5Rdestroy(&rbuf_obj[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - for (i = 0; i < dims3[0]; i++) { - ret = H5Rdestroy(&rbuf_reg[i]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - - /* Free memory buffers */ - free(wbuf_obj); - free(rbuf_obj); - free(wbuf_reg); - free(rbuf_reg); -} /* test_reference_compat() */ -#endif - -/**************************************************************** -** -** test_reference_perf(): Test basic H5R (reference) object reference -** performance. -** -****************************************************************/ -static void -test_reference_perf(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, /* Dataset ID */ - dset2; /* Dereferenced dataset ID */ - hid_t group; /* Group ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hsize_t dims1[] = {1}; - hid_t dapl_id; /* Dataset access property list */ - H5R_ref_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temp. buffer read from disk */ - H5R_ref_t *wbuf_reg, /* buffer to write to disk */ - *rbuf_reg; /* buffer read from disk */ - hobj_ref_t *wbuf_deprec, /* deprecated references */ - *rbuf_deprec; /* deprecated references */ - hdset_reg_ref_t *wbuf_reg_deprec, /* deprecated references*/ - *rbuf_reg_deprec; /* deprecated references*/ - unsigned *ibuf, *obuf; - unsigned i, j; /* Counters */ - H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - double t1, t2, t; /* Timers */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Object Reference Performance\n")); - - /* Allocate write & read buffers */ - wbuf = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - obuf = calloc(sizeof(unsigned), SPACE1_DIM1); - ibuf = calloc(sizeof(unsigned), SPACE1_DIM1); - wbuf_deprec = (hobj_ref_t *)calloc(sizeof(hobj_ref_t), SPACE1_DIM1); - rbuf = (H5R_ref_t *)calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - rbuf_deprec = (hobj_ref_t *)calloc(sizeof(hobj_ref_t), SPACE1_DIM1); - tbuf = (H5R_ref_t *)calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - wbuf_reg = (H5R_ref_t *)calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - rbuf_reg = (H5R_ref_t *)calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - wbuf_reg_deprec = (hdset_reg_ref_t *)calloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); - rbuf_reg_deprec = (hdset_reg_ref_t *)calloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); - - for (i = 0; i < SPACE1_DIM1; i++) - obuf[i] = i * 3; - - /* Create file */ - fid1 = H5Fcreate(FILE_REF_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); - - /* Create dataset access property list */ - dapl_id = H5Pcreate(H5P_DATASET_ACCESS); - CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate"); - - /* Create a group */ - group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - - /* Create a dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset (inside Group1) */ - dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a datatype to refer to */ - tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(tid1, H5I_INVALID_HID, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Save datatype for later */ - ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close group */ - ret = H5Gclose(group); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - t = 0; - for (i = 0; i < MAX_ITER_CREATE; i++) { - t1 = H5_get_time(); - ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_object"); - t2 = H5_get_time(); - t += t2 - t1; - ret = H5Rdestroy(&wbuf[0]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - if (VERBOSE_MED) - printf("--- Object reference create time: %lfs\n", t / MAX_ITER_CREATE); - - /* Create reference to dataset */ - ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]); - CHECK(ret, FAIL, "H5Rcreate_object"); - ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type); - CHECK(ret, FAIL, "H5Rget_obj_type3"); - VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3"); - - t = 0; - for (i = 0; i < MAX_ITER_WRITE; i++) { - t1 = H5_get_time(); - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Object reference write time: %lfs\n", t / MAX_ITER_WRITE); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#if 0 - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - t = 0; - for (i = 0; i < MAX_ITER_CREATE; i++) { - t1 = H5_get_time(); - ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated object reference create time: %lfs\n", t / MAX_ITER_CREATE); - - /* Create reference to dataset */ - ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); - - t = 0; - for (i = 0; i < MAX_ITER_WRITE; i++) { - t1 = H5_get_time(); - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_deprec); - CHECK(ret, FAIL, "H5Dwrite"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated object reference write time: %lfs\n", t / MAX_ITER_WRITE); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#endif - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset5", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - t = 0; - for (i = 0; i < MAX_ITER_CREATE; i++) { - t1 = H5_get_time(); - /* Store first dataset region */ - ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf_reg[0]); - CHECK(ret, FAIL, "H5Rcreate_region"); - t2 = H5_get_time(); - t += t2 - t1; - ret = H5Rdestroy(&wbuf_reg[0]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - if (VERBOSE_MED) - printf("--- Region reference create time: %lfs\n", t / MAX_ITER_CREATE); - - /* Store first dataset region */ - ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf_reg[0]); - CHECK(ret, FAIL, "H5Rcreate_region"); - - t = 0; - for (i = 0; i < MAX_ITER_WRITE; i++) { - t1 = H5_get_time(); - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg); - CHECK(ret, FAIL, "H5Dwrite"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Region reference write time: %lfs\n", t / MAX_ITER_WRITE); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#if 0 - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset6", H5T_STD_REF_DSETREG, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - t = 0; - for (i = 0; i < MAX_ITER_CREATE; i++) { - t1 = H5_get_time(); - /* Store first dataset region */ - ret = H5Rcreate(&wbuf_reg_deprec[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION1, sid1); - CHECK(ret, FAIL, "H5Rcreate"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated region reference create time: %lfs\n", t / MAX_ITER_CREATE); - - t = 0; - for (i = 0; i < MAX_ITER_WRITE; i++) { - t1 = H5_get_time(); - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg_deprec); - CHECK(ret, FAIL, "H5Dwrite"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated region reference write time: %lfs\n", t / MAX_ITER_WRITE); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#endif - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open the file */ - fid1 = H5Fopen(FILE_REF_OBJ, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - t = 0; - for (i = 0; i < MAX_ITER_READ; i++) { - t1 = H5_get_time(); - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - t2 = H5_get_time(); - t += t2 - t1; - ret = H5Rdestroy(&rbuf[0]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - if (VERBOSE_MED) - printf("--- Object reference read time: %lfs\n", t / MAX_ITER_READ); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Open dataset object */ - dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); - CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object"); - - /* Check information in referenced dataset */ - sid1 = H5Dget_space(dset2); - CHECK(sid1, H5I_INVALID_HID, "H5Dget_space"); - - ret = (int)H5Sget_simple_extent_npoints(sid1); - VERIFY(ret, dims1[0], "H5Sget_simple_extent_npoints"); - - /* Read from disk */ - ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); - CHECK(ret, FAIL, "H5Dread"); - - for (i = 0; i < dims1[0]; i++) - VERIFY(ibuf[i], i * 3, "Data"); - - /* Close dereferenced Dataset */ - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#if 0 - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - t = 0; - for (i = 0; i < MAX_ITER_READ; i++) { - t1 = H5_get_time(); - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_deprec); - CHECK(ret, FAIL, "H5Dread"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated object reference read time: %lfs\n", t / MAX_ITER_READ); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#endif - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset5", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - t = 0; - for (i = 0; i < MAX_ITER_READ; i++) { - t1 = H5_get_time(); - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg); - CHECK(ret, FAIL, "H5Dread"); - t2 = H5_get_time(); - t += t2 - t1; - ret = H5Rdestroy(&rbuf_reg[0]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - if (VERBOSE_MED) - printf("--- Region reference read time: %lfs\n", t / MAX_ITER_READ); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg); - CHECK(ret, FAIL, "H5Dread"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#if 0 - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset6", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); - - t = 0; - for (i = 0; i < MAX_ITER_READ; i++) { - t1 = H5_get_time(); - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg_deprec); - CHECK(ret, FAIL, "H5Dread"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated region reference read time: %lfs\n", t / MAX_ITER_READ); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); -#endif - /* Close dataset access property list */ - ret = H5Pclose(dapl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Destroy references */ - for (j = 0; j < dims1[0]; j++) { - ret = H5Rdestroy(&wbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&wbuf_reg[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&rbuf[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - ret = H5Rdestroy(&rbuf_reg[j]); - CHECK(ret, FAIL, "H5Rdestroy"); - } - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(wbuf_reg); - free(rbuf_reg); - free(wbuf_deprec); - free(rbuf_deprec); - free(wbuf_reg_deprec); - free(rbuf_reg_deprec); - free(tbuf); - free(ibuf); - free(obuf); -} /* test_reference_perf() */ - -/**************************************************************** -** -** test_reference(): Main H5R reference testing routine. -** -****************************************************************/ -void -test_reference(void) -{ - H5F_libver_t low, high; /* Low and high bounds */ - const char *env_h5_drvr; /* File Driver value from environment */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing References\n")); - - /* Get the VFD to use */ - env_h5_drvr = getenv(HDF5_DRIVER); - if (env_h5_drvr == NULL) - env_h5_drvr = "nomatch"; - - test_reference_params(); /* Test for correct parameter checking */ - test_reference_obj(); /* Test basic H5R object reference code */ - test_reference_vlen_obj(); /* Test reference within vlen */ - test_reference_cmpnd_obj(); /* Test reference within compound type */ - - /* Loop through all the combinations of low/high version bounds */ - for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) { - for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) { - - /* Invalid combinations, just continue */ - if (high == H5F_LIBVER_EARLIEST || high < low) - continue; - - test_reference_region(low, high); /* Test basic H5R dataset region reference code */ - test_reference_region_1D(low, high); /* Test H5R dataset region reference code for 1-D datasets */ - - } /* end high bound */ - } /* end low bound */ - - /* The following test is currently broken with the Direct VFD */ - if (strcmp(env_h5_drvr, "direct") != 0) { - test_reference_obj_deleted(); /* Test H5R object reference code for deleted objects */ - } - - test_reference_group(); /* Test operations on dereferenced groups */ - test_reference_attr(); /* Test attribute references */ - test_reference_external(); /* Test external references */ -#if 0 - test_reference_compat_conv(); /* Test operations with old types */ -#endif - - test_reference_perf(); - -} /* test_reference() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_reference - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_reference(void) -{ - H5Fdelete(FILE_REF_PARAM, H5P_DEFAULT); - H5Fdelete(FILE_REF_OBJ, H5P_DEFAULT); - H5Fdelete(FILE_REF_VL_OBJ, H5P_DEFAULT); - H5Fdelete(FILE_REF_CMPND_OBJ, H5P_DEFAULT); - H5Fdelete(FILE_REF_REG, H5P_DEFAULT); - H5Fdelete(FILE_REF_REG_1D, H5P_DEFAULT); - H5Fdelete(FILE_REF_OBJ_DEL, H5P_DEFAULT); - H5Fdelete(FILE_REF_GRP, H5P_DEFAULT); - H5Fdelete(FILE_REF_ATTR, H5P_DEFAULT); - H5Fdelete(FILE_REF_EXT1, H5P_DEFAULT); - H5Fdelete(FILE_REF_EXT2, H5P_DEFAULT); - H5Fdelete(FILE_REF_COMPAT, H5P_DEFAULT); -} diff --git a/test/API/tselect.c b/test/API/tselect.c deleted file mode 100644 index 9d398be7916..00000000000 --- a/test/API/tselect.c +++ /dev/null @@ -1,16308 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tselect - * - * Test the Dataspace selection functionality - * - *************************************************************/ - -#define H5S_FRIEND /*suppress error about including H5Spkg */ - -/* Define this macro to indicate that the testing APIs should be available */ -#define H5S_TESTING - -#include "testhdf5.h" -#include "hdf5.h" -/* #include "H5Spkg.h" */ /* Dataspaces */ - -#define FILENAME "tselect.h5" - -/* 3-D dataset with fixed dimensions */ -#define SPACE1_NAME "Space1" -#define SPACE1_RANK 3 -#define SPACE1_DIM1 3 -#define SPACE1_DIM2 15 -#define SPACE1_DIM3 13 - -/* 2-D dataset with fixed dimensions */ -#define SPACE2_NAME "Space2" -#define SPACE2_RANK 2 -#define SPACE2_DIM1 30 -#define SPACE2_DIM2 26 -#define SPACE2A_RANK 1 -#define SPACE2A_DIM1 (SPACE2_DIM1 * SPACE2_DIM2) - -/* 2-D dataset with fixed dimensions */ -#define SPACE3_NAME "Space3" -#define SPACE3_RANK 2 -#define SPACE3_DIM1 15 -#define SPACE3_DIM2 26 - -/* 3-D dataset with fixed dimensions */ -#define SPACE4_NAME "Space4" -#define SPACE4_RANK 3 -#define SPACE4_DIM1 11 -#define SPACE4_DIM2 13 -#define SPACE4_DIM3 17 - -/* Number of random hyperslabs to test */ -#define NHYPERSLABS 10 - -/* Number of random hyperslab tests performed */ -#define NRAND_HYPER 100 - -/* 5-D dataset with fixed dimensions */ -#define SPACE5_NAME "Space5" -#define SPACE5_RANK 5 -#define SPACE5_DIM1 10 -#define SPACE5_DIM2 10 -#define SPACE5_DIM3 10 -#define SPACE5_DIM4 10 -#define SPACE5_DIM5 10 - -/* 1-D dataset with same size as 5-D dataset */ -#define SPACE6_RANK 1 -#define SPACE6_DIM1 (SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5) - -/* 2-D dataset with easy dimension sizes */ -#define SPACE7_NAME "Space7" -#define SPACE7_RANK 2 -#define SPACE7_DIM1 10 -#define SPACE7_DIM2 10 -#define SPACE7_FILL 254 -#define SPACE7_CHUNK_DIM1 5 -#define SPACE7_CHUNK_DIM2 5 -#define SPACE7_NPOINTS 8 - -/* 4-D dataset with fixed dimensions */ -#define SPACE8_NAME "Space8" -#define SPACE8_RANK 4 -#define SPACE8_DIM1 11 -#define SPACE8_DIM2 13 -#define SPACE8_DIM3 17 -#define SPACE8_DIM4 19 - -/* Another 2-D dataset with easy dimension sizes */ -#define SPACE9_RANK 2 -#define SPACE9_DIM1 12 -#define SPACE9_DIM2 12 - -/* Element selection information */ -#define POINT1_NPOINTS 10 - -/* Chunked dataset information */ -#define DATASETNAME "ChunkArray" -#define NX_SUB 87 /* hyperslab dimensions */ -#define NY_SUB 61 -#define NZ_SUB 181 -#define NX 87 /* output buffer dimensions */ -#define NY 61 -#define NZ 181 -#define RANK_F 3 /* File dataspace rank */ -#define RANK_M 3 /* Memory dataspace rank */ -#define X 87 /* dataset dimensions */ -#define Y 61 -#define Z 181 -#define CHUNK_X 87 /* chunk dimensions */ -#define CHUNK_Y 61 -#define CHUNK_Z 181 - -/* Basic chunk size */ -#define SPACE10_DIM1 180 -#define SPACE10_CHUNK_SIZE 12 - -/* Information for bounds checking test */ -#define SPACE11_RANK 2 -#define SPACE11_DIM1 100 -#define SPACE11_DIM2 100 -#define SPACE11_NPOINTS 4 - -/* Information for offsets w/chunks test #2 */ -#define SPACE12_RANK 1 -#define SPACE12_DIM0 25 -#define SPACE12_CHUNK_DIM0 5 - -/* Information for Space rebuild test */ -#define SPACERE1_RANK 1 -#define SPACERE1_DIM0 20 -#define SPACERE2_RANK 2 -#define SPACERE2_DIM0 8 -#define SPACERE2_DIM1 12 -#define SPACERE3_RANK 3 -#define SPACERE3_DIM0 8 -#define SPACERE3_DIM1 12 -#define SPACERE3_DIM2 8 -#define SPACERE4_RANK 4 -#define SPACERE4_DIM0 8 -#define SPACERE4_DIM1 12 -#define SPACERE4_DIM2 8 -#define SPACERE4_DIM3 12 -#define SPACERE5_RANK 5 -#define SPACERE5_DIM0 8 -#define SPACERE5_DIM1 12 -#define SPACERE5_DIM2 8 -#define SPACERE5_DIM3 12 -#define SPACERE5_DIM4 8 - -/* Information for Space update diminfo test */ -#define SPACEUD1_DIM0 20 -#define SPACEUD3_DIM0 9 -#define SPACEUD3_DIM1 12 -#define SPACEUD3_DIM2 13 - -/* #defines for shape same / different rank tests */ -#define SS_DR_MAX_RANK 5 - -/* Information for regular hyperslab query test */ -#define SPACE13_RANK 3 -#define SPACE13_DIM1 50 -#define SPACE13_DIM2 50 -#define SPACE13_DIM3 50 -#define SPACE13_NPOINTS 4 - -/* Information for testing selection iterators */ -#define SEL_ITER_MAX_SEQ 256 - -/* Defines for test_hyper_io_1d() */ -#define DNAME "DSET_1D" -#define RANK 1 -#define NUMCHUNKS 3 -#define CHUNKSZ 20 -#define NUM_ELEMENTS NUMCHUNKS *CHUNKSZ - -/* Location comparison function */ -static int compare_size_t(const void *s1, const void *s2); - -static herr_t test_select_hyper_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, - void *operator_data); -static herr_t test_select_point_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, - void *operator_data); -static herr_t test_select_all_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, - void *operator_data); -static herr_t test_select_none_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, - void *operator_data); -static herr_t test_select_hyper_iter2(void *_elem, hid_t type_id, unsigned ndim, const hsize_t *point, - void *_operator_data); -static herr_t test_select_hyper_iter3(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, - void *operator_data); - -/**************************************************************** -** -** test_select_hyper_iter1(): Iterator for checking hyperslab iteration -** -****************************************************************/ -static herr_t -test_select_hyper_iter1(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim, - const hsize_t H5_ATTR_UNUSED *point, void *_operator_data) -{ - uint8_t *tbuf = (uint8_t *)_elem, /* temporary buffer pointer */ - **tbuf2 = (uint8_t **)_operator_data; /* temporary buffer handle */ - - if (*tbuf != **tbuf2) - return (-1); - else { - (*tbuf2)++; - return (0); - } -} /* end test_select_hyper_iter1() */ - -/**************************************************************** -** -** test_select_hyper(): Test basic H5S (dataspace) selection code. -** Tests hyperslabs of various sizes and dimensionalities. -** -****************************************************************/ -static void -test_select_hyper(hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - H5S_class_t ext_type; /* Extent type */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Verify extent type */ - ext_type = H5Sget_simple_extent_type(sid1); - VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); - - /* Test selecting stride==0 to verify failure */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 0; - stride[1] = 0; - stride[2] = 0; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - H5E_BEGIN_TRY - { - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - - /* Test selecting stridebuf + (pnt_info->coord[pnt_info->offset][0] * SPACE2_DIM2) + - pnt_info->coord[pnt_info->offset][1]; - if (*elem != *tmp) - return (-1); - else { - pnt_info->offset++; - return (0); - } -} /* end test_select_point_iter1() */ - -/**************************************************************** -** -** test_select_point(): Test basic H5S (dataspace) selection code. -** Tests element selections between dataspaces of various sizes -** and dimensionalities. -** -****************************************************************/ -static void -test_select_point(hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ - hsize_t temp_coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ - hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ - hsize_t temp_coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ - hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ - hsize_t temp_coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j; /* Counters */ - struct pnt_iter pi; /* Custom Pointer iterator struct */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Element Selection Functions\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for write buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for disk dataset */ - coord1[0][0] = 0; - coord1[0][1] = 10; - coord1[0][2] = 5; - coord1[1][0] = 1; - coord1[1][1] = 2; - coord1[1][2] = 7; - coord1[2][0] = 2; - coord1[2][1] = 4; - coord1[2][2] = 9; - coord1[3][0] = 0; - coord1[3][1] = 6; - coord1[3][2] = 11; - coord1[4][0] = 1; - coord1[4][1] = 8; - coord1[4][2] = 13; - coord1[5][0] = 2; - coord1[5][1] = 12; - coord1[5][2] = 0; - coord1[6][0] = 0; - coord1[6][1] = 14; - coord1[6][2] = 2; - coord1[7][0] = 1; - coord1[7][1] = 0; - coord1[7][2] = 4; - coord1[8][0] = 2; - coord1[8][1] = 1; - coord1[8][2] = 6; - coord1[9][0] = 0; - coord1[9][1] = 3; - coord1[9][2] = 8; - ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Verify correct elements selected */ - H5Sget_select_elem_pointlist(sid1, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord1); - for (i = 0; i < POINT1_NPOINTS; i++) { - VERIFY(temp_coord1[i][0], coord1[i][0], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord1[i][1], coord1[i][1], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord1[i][2], coord1[i][2], "H5Sget_select_elem_pointlist"); - } /* end for */ - - ret = (int)H5Sget_select_npoints(sid1); - VERIFY(ret, 10, "H5Sget_select_npoints"); - - /* Append another sequence of ten points to disk dataset */ - coord1[0][0] = 0; - coord1[0][1] = 2; - coord1[0][2] = 0; - coord1[1][0] = 1; - coord1[1][1] = 10; - coord1[1][2] = 8; - coord1[2][0] = 2; - coord1[2][1] = 8; - coord1[2][2] = 10; - coord1[3][0] = 0; - coord1[3][1] = 7; - coord1[3][2] = 12; - coord1[4][0] = 1; - coord1[4][1] = 3; - coord1[4][2] = 11; - coord1[5][0] = 2; - coord1[5][1] = 1; - coord1[5][2] = 1; - coord1[6][0] = 0; - coord1[6][1] = 13; - coord1[6][2] = 7; - coord1[7][0] = 1; - coord1[7][1] = 14; - coord1[7][2] = 6; - coord1[8][0] = 2; - coord1[8][1] = 2; - coord1[8][2] = 5; - coord1[9][0] = 0; - coord1[9][1] = 6; - coord1[9][2] = 13; - ret = H5Sselect_elements(sid1, H5S_SELECT_APPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Verify correct elements selected */ - H5Sget_select_elem_pointlist(sid1, (hsize_t)POINT1_NPOINTS, (hsize_t)POINT1_NPOINTS, - (hsize_t *)temp_coord1); - for (i = 0; i < POINT1_NPOINTS; i++) { - VERIFY(temp_coord1[i][0], coord1[i][0], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord1[i][1], coord1[i][1], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord1[i][2], coord1[i][2], "H5Sget_select_elem_pointlist"); - } /* end for */ - - ret = (int)H5Sget_select_npoints(sid1); - VERIFY(ret, 20, "H5Sget_select_npoints"); - - /* Select sequence of ten points for memory dataset */ - coord2[0][0] = 12; - coord2[0][1] = 3; - coord2[1][0] = 15; - coord2[1][1] = 13; - coord2[2][0] = 7; - coord2[2][1] = 25; - coord2[3][0] = 0; - coord2[3][1] = 6; - coord2[4][0] = 13; - coord2[4][1] = 0; - coord2[5][0] = 24; - coord2[5][1] = 11; - coord2[6][0] = 12; - coord2[6][1] = 21; - coord2[7][0] = 29; - coord2[7][1] = 4; - coord2[8][0] = 8; - coord2[8][1] = 8; - coord2[9][0] = 19; - coord2[9][1] = 17; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Verify correct elements selected */ - H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord2); - for (i = 0; i < POINT1_NPOINTS; i++) { - VERIFY(temp_coord2[i][0], coord2[i][0], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord2[i][1], coord2[i][1], "H5Sget_select_elem_pointlist"); - } /* end for */ - - /* Save points for later iteration */ - /* (these are in the second half of the buffer, because we are prepending */ - /* the next list of points to the beginning of the point selection list) */ - memcpy(((char *)pi.coord) + sizeof(coord2), coord2, sizeof(coord2)); - - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, 10, "H5Sget_select_npoints"); - - /* Append another sequence of ten points to memory dataset */ - coord2[0][0] = 24; - coord2[0][1] = 0; - coord2[1][0] = 2; - coord2[1][1] = 25; - coord2[2][0] = 13; - coord2[2][1] = 17; - coord2[3][0] = 8; - coord2[3][1] = 3; - coord2[4][0] = 29; - coord2[4][1] = 4; - coord2[5][0] = 11; - coord2[5][1] = 14; - coord2[6][0] = 5; - coord2[6][1] = 22; - coord2[7][0] = 12; - coord2[7][1] = 2; - coord2[8][0] = 21; - coord2[8][1] = 12; - coord2[9][0] = 9; - coord2[9][1] = 18; - ret = H5Sselect_elements(sid2, H5S_SELECT_PREPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Verify correct elements selected */ - H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord2); - for (i = 0; i < POINT1_NPOINTS; i++) { - VERIFY(temp_coord2[i][0], coord2[i][0], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord2[i][1], coord2[i][1], "H5Sget_select_elem_pointlist"); - } /* end for */ - - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, 20, "H5Sget_select_npoints"); - - /* Save points for later iteration */ - memcpy(pi.coord, coord2, sizeof(coord2)); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select sequence of points for read dataset */ - coord3[0][0] = 0; - coord3[0][1] = 2; - coord3[1][0] = 4; - coord3[1][1] = 8; - coord3[2][0] = 13; - coord3[2][1] = 13; - coord3[3][0] = 14; - coord3[3][1] = 20; - coord3[4][0] = 7; - coord3[4][1] = 9; - coord3[5][0] = 2; - coord3[5][1] = 0; - coord3[6][0] = 9; - coord3[6][1] = 19; - coord3[7][0] = 1; - coord3[7][1] = 22; - coord3[8][0] = 12; - coord3[8][1] = 21; - coord3[9][0] = 11; - coord3[9][1] = 6; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Verify correct elements selected */ - H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord3); - for (i = 0; i < POINT1_NPOINTS; i++) { - VERIFY(temp_coord3[i][0], coord3[i][0], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord3[i][1], coord3[i][1], "H5Sget_select_elem_pointlist"); - } /* end for */ - - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, 10, "H5Sget_select_npoints"); - - /* Append another sequence of ten points to disk dataset */ - coord3[0][0] = 14; - coord3[0][1] = 25; - coord3[1][0] = 0; - coord3[1][1] = 0; - coord3[2][0] = 11; - coord3[2][1] = 11; - coord3[3][0] = 5; - coord3[3][1] = 14; - coord3[4][0] = 3; - coord3[4][1] = 5; - coord3[5][0] = 2; - coord3[5][1] = 2; - coord3[6][0] = 7; - coord3[6][1] = 13; - coord3[7][0] = 9; - coord3[7][1] = 16; - coord3[8][0] = 12; - coord3[8][1] = 22; - coord3[9][0] = 13; - coord3[9][1] = 9; - ret = H5Sselect_elements(sid2, H5S_SELECT_APPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Verify correct elements selected */ - H5Sget_select_elem_pointlist(sid2, (hsize_t)POINT1_NPOINTS, (hsize_t)POINT1_NPOINTS, - (hsize_t *)temp_coord3); - for (i = 0; i < POINT1_NPOINTS; i++) { - VERIFY(temp_coord3[i][0], coord3[i][0], "H5Sget_select_elem_pointlist"); - VERIFY(temp_coord3[i][1], coord3[i][1], "H5Sget_select_elem_pointlist"); - } /* end for */ - ret = (int)H5Sget_select_npoints(sid2); - VERIFY(ret, 20, "H5Sget_select_npoints"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Check that the values match with a dataset iterator */ - pi.buf = wbuf; - pi.offset = 0; - ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_point_iter1, &pi); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_point() */ - -/**************************************************************** -** -** test_select_all_iter1(): Iterator for checking all iteration -** -** -****************************************************************/ -static herr_t -test_select_all_iter1(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim, - const hsize_t H5_ATTR_UNUSED *point, void *_operator_data) -{ - uint8_t *tbuf = (uint8_t *)_elem, /* temporary buffer pointer */ - **tbuf2 = (uint8_t **)_operator_data; /* temporary buffer handle */ - - if (*tbuf != **tbuf2) - return (-1); - else { - (*tbuf2)++; - return (0); - } -} /* end test_select_all_iter1() */ - -/**************************************************************** -** -** test_select_none_iter1(): Iterator for checking none iteration -** (This is never supposed to be called, so it always returns -1) -** -****************************************************************/ -static herr_t -test_select_none_iter1(void H5_ATTR_UNUSED *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim, - const hsize_t H5_ATTR_UNUSED *point, void H5_ATTR_UNUSED *_operator_data) -{ - return (-1); -} /* end test_select_none_iter1() */ - -/**************************************************************** -** -** test_select_all(): Test basic H5S (dataspace) selection code. -** Tests "all" selections. -** -****************************************************************/ -static void -test_select_all(hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hsize_t dims1[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3}; - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j, k; /* Counters */ - herr_t ret; /* Generic return value */ - H5S_class_t ext_type; /* Extent type */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 'All' Selection Functions\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE4_DIM1; i++) - for (j = 0; j < SPACE4_DIM2; j++) - for (k = 0; k < SPACE4_DIM3; k++) - *tbuf++ = (uint8_t)((((i * SPACE4_DIM2) + j) * SPACE4_DIM3) + k); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE4_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Verify extent type */ - ext_type = H5Sget_simple_extent_type(sid1); - VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE4_NAME, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Check that the values match with a dataset iterator */ - tbuf = wbuf; - ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid1, test_select_all_iter1, &tbuf); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_all() */ - -/**************************************************************** -** -** test_select_all_hyper(): Test basic H5S (dataspace) selection code. -** Tests "all" and hyperslab selections. -** -****************************************************************/ -static void -test_select_all_hyper(hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - H5S_class_t ext_type; /* Extent type */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing 'All' Selection Functions\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE3_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Verify extent type */ - ext_type = H5Sget_simple_extent_type(sid1); - VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); - - /* Select entire 15x26 extent for disk dataset */ - ret = H5Sselect_all(sid1); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Select 15x26 hyperslab for memory dataset */ - start[0] = 15; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE3_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select no extent for disk dataset */ - ret = H5Sselect_none(sid1); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Read selection from disk (should fail with no selection defined) */ - H5E_BEGIN_TRY - { - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dread"); - - /* Select entire 15x26 extent for disk dataset */ - ret = H5Sselect_all(sid1); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Read selection from disk (should work now) */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Check that the values match with a dataset iterator */ - tbuf = wbuf + (15 * SPACE2_DIM2); - ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_all_iter1, &tbuf); - CHECK(ret, FAIL, "H5Diterate"); - - /* A quick check to make certain that iterating through a "none" selection works */ - ret = H5Sselect_none(sid2); - CHECK(ret, FAIL, "H5Sselect_none"); - ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_none_iter1, &tbuf); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_all_hyper() */ - -/**************************************************************** -** -** test_select_combo(): Test basic H5S (dataspace) selection code. -** Tests combinations of element and hyperslab selections between -** dataspaces of various sizes and dimensionalities. -** -****************************************************************/ -static void -test_select_combo(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Combination of Hyperslab & Element Selection Functions\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for write buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for disk dataset */ - coord1[0][0] = 0; - coord1[0][1] = 10; - coord1[0][2] = 5; - coord1[1][0] = 1; - coord1[1][1] = 2; - coord1[1][2] = 7; - coord1[2][0] = 2; - coord1[2][1] = 4; - coord1[2][2] = 9; - coord1[3][0] = 0; - coord1[3][1] = 6; - coord1[3][2] = 11; - coord1[4][0] = 1; - coord1[4][1] = 8; - coord1[4][2] = 13; - coord1[5][0] = 2; - coord1[5][1] = 12; - coord1[5][2] = 0; - coord1[6][0] = 0; - coord1[6][1] = 14; - coord1[6][2] = 2; - coord1[7][0] = 1; - coord1[7][1] = 0; - coord1[7][2] = 4; - coord1[8][0] = 2; - coord1[8][1] = 1; - coord1[8][2] = 6; - coord1[9][0] = 0; - coord1[9][1] = 3; - coord1[9][2] = 8; - ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Select 1x10 hyperslab for writing memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 10x1 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0; i < POINT1_NPOINTS; i++) { - tbuf = wbuf + i; - tbuf2 = rbuf + (i * SPACE3_DIM2); - if (*tbuf != *tbuf2) - TestErrPrintf("element values don't match!, i=%d\n", i); - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_combo() */ - -static int -compare_size_t(const void *s1, const void *s2) -{ - if (*(const size_t *)s1 < *(const size_t *)s2) - return (-1); - else if (*(const size_t *)s1 > *(const size_t *)s2) - return (1); - else - return (0); -} - -/**************************************************************** -** -** test_select_hyper_stride(): Test H5S (dataspace) selection code. -** Tests strided hyperslabs of various sizes and dimensionalities. -** -****************************************************************/ -static void -test_select_hyper_stride(hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - uint16_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - size_t loc1[72] = { - /* Gruesomely ugly way to make certain hyperslab locations are checked correctly */ - 27, 28, 29, 53, 54, 55, 79, 80, 81, /* Block #1 */ - 32, 33, 34, 58, 59, 60, 84, 85, 86, /* Block #2 */ - 157, 158, 159, 183, 184, 185, 209, 210, 211, /* Block #3 */ - 162, 163, 164, 188, 189, 190, 214, 215, 216, /* Block #4 */ - 287, 288, 289, 313, 314, 315, 339, 340, 341, /* Block #5 */ - 292, 293, 294, 318, 319, 320, 344, 345, 346, /* Block #6 */ - 417, 418, 419, 443, 444, 445, 469, 470, 471, /* Block #7 */ - 422, 423, 424, 448, 449, 450, 474, 475, 476, /* Block #8 */ - }; - size_t loc2[72] = { - 0, 1, 2, 26, 27, 28, /* Block #1 */ - 4, 5, 6, 30, 31, 32, /* Block #2 */ - 8, 9, 10, 34, 35, 36, /* Block #3 */ - 12, 13, 14, 38, 39, 40, /* Block #4 */ - 104, 105, 106, 130, 131, 132, /* Block #5 */ - 108, 109, 110, 134, 135, 136, /* Block #6 */ - 112, 113, 114, 138, 139, 140, /* Block #7 */ - 116, 117, 118, 142, 143, 144, /* Block #8 */ - 208, 209, 210, 234, 235, 236, /* Block #9 */ - 212, 213, 214, 238, 239, 240, /* Block #10 */ - 216, 217, 218, 242, 243, 244, /* Block #11 */ - 220, 221, 222, 246, 247, 248, /* Block #12 */ - }; - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n")); - - /* Allocate write & read buffers */ - wbuf = (uint16_t *)malloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x3x3 count with a stride of 2x4x3 & 1x2x2 block hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - start[2] = 0; - stride[0] = 2; - stride[1] = 4; - stride[2] = 3; - count[0] = 2; - count[1] = 3; - count[2] = 3; - block[0] = 1; - block[1] = 2; - block[2] = 2; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 4x2 count with a stride of 5x5 & 3x3 block hyperslab for memory dataset */ - start[0] = 1; - start[1] = 1; - stride[0] = 5; - stride[1] = 5; - count[0] = 4; - count[1] = 2; - block[0] = 3; - block[1] = 3; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 3x4 count with a stride of 4x4 & 2x3 block hyperslab for memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 4; - stride[1] = 4; - count[0] = 3; - count[1] = 4; - block[0] = 2; - block[1] = 3; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Sort the locations into the proper order */ - qsort(loc1, (size_t)72, sizeof(size_t), compare_size_t); - qsort(loc2, (size_t)72, sizeof(size_t), compare_size_t); - /* Compare data read with data written out */ - for (i = 0; i < 72; i++) { - tbuf = wbuf + loc1[i]; - tbuf2 = rbuf + loc2[i]; - if (*tbuf != *tbuf2) { - printf("%d: hyperslab values don't match!, loc1[%d]=%d, loc2[%d]=%d\n", __LINE__, i, (int)loc1[i], - i, (int)loc2[i]); - printf("wbuf=%p, tbuf=%p, rbuf=%p, tbuf2=%p\n", (void *)wbuf, (void *)tbuf, (void *)rbuf, - (void *)tbuf2); - TestErrPrintf("*tbuf=%u, *tbuf2=%u\n", (unsigned)*tbuf, (unsigned)*tbuf2); - } /* end if */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_stride() */ - -/**************************************************************** -** -** test_select_hyper_contig(): Test H5S (dataspace) selection code. -** Tests contiguous hyperslabs of various sizes and dimensionalities. -** -****************************************************************/ -static void -test_select_hyper_contig(hid_t dset_type, hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims2[] = {SPACE2_DIM2, SPACE2_DIM1}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - uint16_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Contiguous Hyperslabs Functionality\n")); - - /* Allocate write & read buffers */ - wbuf = (uint16_t *)malloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 12x10 count with a stride of 1x3 & 3x3 block hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 3; - count[0] = 12; - count[1] = 10; - block[0] = 1; - block[1] = 3; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 4x5 count with a stride of 3x6 & 3x6 block hyperslab for memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 3; - stride[1] = 6; - count[0] = 4; - count[1] = 5; - block[0] = 3; - block[1] = 6; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE2_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 6x5 count with a stride of 2x6 & 2x6 block hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 2; - stride[1] = 6; - count[0] = 6; - count[1] = 5; - block[0] = 2; - block[1] = 6; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 3x15 count with a stride of 4x2 & 4x2 block hyperslab for memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 4; - stride[1] = 2; - count[0] = 3; - count[1] = 15; - block[0] = 4; - block[1] = 2; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - if (memcmp(rbuf, wbuf, sizeof(uint16_t) * 30 * 12) != 0) - TestErrPrintf("hyperslab values don't match! Line=%d\n", __LINE__); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_contig() */ - -/**************************************************************** -** -** test_select_hyper_contig2(): Test H5S (dataspace) selection code. -** Tests more contiguous hyperslabs of various sizes and dimensionalities. -** -****************************************************************/ -static void -test_select_hyper_contig2(hid_t dset_type, hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims2[] = {SPACE8_DIM4, SPACE8_DIM3, SPACE8_DIM2, SPACE8_DIM1}; - hsize_t start[SPACE8_RANK]; /* Starting location of hyperslab */ - hsize_t count[SPACE8_RANK]; /* Element count of hyperslab */ - uint16_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j, k, l; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing More Contiguous Hyperslabs Functionality\n")); - - /* Allocate write & read buffers */ - wbuf = (uint16_t *)malloc(sizeof(uint16_t) * SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4); - CHECK_PTR(wbuf, "malloc"); - rbuf = - (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE8_DIM1; i++) - for (j = 0; j < SPACE8_DIM2; j++) - for (k = 0; k < SPACE8_DIM3; k++) - for (l = 0; l < SPACE8_DIM4; l++) - *tbuf++ = (uint16_t)((i * SPACE8_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE8_RANK, dims2, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select contiguous hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - start[2] = 0; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select contiguous hyperslab in memory */ - start[0] = 0; - start[1] = 0; - start[2] = 0; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE8_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select contiguous hyperslab in memory */ - start[0] = 0; - start[1] = 0; - start[2] = 0; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select contiguous hyperslab in memory */ - start[0] = 0; - start[1] = 0; - start[2] = 0; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - if (memcmp(rbuf, wbuf, sizeof(uint16_t) * 2 * SPACE8_DIM3 * SPACE8_DIM2 * SPACE8_DIM1) != 0) - TestErrPrintf("Error: hyperslab values don't match!\n"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_contig2() */ - -/**************************************************************** -** -** test_select_hyper_contig3(): Test H5S (dataspace) selection code. -** Tests contiguous hyperslabs of various sizes and dimensionalities. -** This test uses a hyperslab that is contiguous in the lowest dimension, -** not contiguous in a dimension, then has a selection across the entire next -** dimension (which should be "flattened" out also). -** -****************************************************************/ -static void -test_select_hyper_contig3(hid_t dset_type, hid_t xfer_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims2[] = {SPACE8_DIM4, SPACE8_DIM3, SPACE8_DIM2, SPACE8_DIM1}; - hsize_t start[SPACE8_RANK]; /* Starting location of hyperslab */ - hsize_t count[SPACE8_RANK]; /* Element count of hyperslab */ - uint16_t *wbuf, /* Buffer to write to disk */ - *rbuf, /* Buffer read from disk */ - *tbuf, *tbuf2; /* Temporary buffer pointers */ - unsigned i, j, k, l; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Yet More Contiguous Hyperslabs Functionality\n")); - - /* Allocate write & read buffers */ - wbuf = (uint16_t *)malloc(sizeof(uint16_t) * SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4); - CHECK_PTR(wbuf, "malloc"); - rbuf = - (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE8_DIM4; i++) - for (j = 0; j < SPACE8_DIM3; j++) - for (k = 0; k < SPACE8_DIM2; k++) - for (l = 0; l < SPACE8_DIM1; l++) - *tbuf++ = (uint16_t)((k * SPACE8_DIM2) + l); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE8_RANK, dims2, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select semi-contiguous hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - start[2] = SPACE8_DIM2 / 2; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2 / 2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select semi-contiguous hyperslab in memory */ - start[0] = 0; - start[1] = 0; - start[2] = SPACE8_DIM2 / 2; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2 / 2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE8_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select semi-contiguous hyperslab in memory */ - start[0] = 0; - start[1] = 0; - start[2] = SPACE8_DIM2 / 2; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2 / 2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select semi-contiguous hyperslab in memory */ - start[0] = 0; - start[1] = 0; - start[2] = SPACE8_DIM2 / 2; - start[3] = 0; - count[0] = 2; - count[1] = SPACE8_DIM3; - count[2] = SPACE8_DIM2 / 2; - count[3] = SPACE8_DIM1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0, tbuf = wbuf, tbuf2 = rbuf; i < SPACE8_DIM4; i++) - for (j = 0; j < SPACE8_DIM3; j++) - for (k = 0; k < SPACE8_DIM2; k++) - for (l = 0; l < SPACE8_DIM1; l++, tbuf++, tbuf2++) - if ((i >= start[0] && i < (start[0] + count[0])) && - (j >= start[1] && j < (start[1] + count[1])) && - (k >= start[2] && k < (start[2] + count[2])) && - (l >= start[3] && l < (start[3] + count[3]))) { - if (*tbuf != *tbuf2) { - printf("Error: hyperslab values don't match!\n"); - TestErrPrintf("Line: %d, i=%u, j=%u, k=%u, l=%u, *tbuf=%u,*tbuf2=%u\n", __LINE__, - i, j, k, l, (unsigned)*tbuf, (unsigned)*tbuf2); - } /* end if */ - } /* end if */ - else { - if (*tbuf2 != 0) { - printf("Error: invalid data in read buffer!\n"); - TestErrPrintf("Line: %d, i=%u, j=%u, k=%u, l=%u, *tbuf=%u,*tbuf2=%u\n", __LINE__, - i, j, k, l, (unsigned)*tbuf, (unsigned)*tbuf2); - } /* end if */ - } /* end else */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_contig3() */ - -#if 0 -/**************************************************************** -** -** verify_select_hyper_contig_dr__run_test(): Verify data from -** test_select_hyper_contig_dr__run_test() -** -****************************************************************/ -static void -verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t cube_size, - unsigned edge_size, unsigned cube_rank) -{ - const uint16_t *cube_ptr; /* Pointer into the cube buffer */ - uint16_t expected_value; /* Expected value in dataset */ - unsigned i, j, k, l, m; /* Local index variables */ - size_t s; /* Local index variable */ - bool mis_match; /* Flag to indicate mismatch in expected value */ - - assert(cube_buf); - assert(cube_size > 0); - - expected_value = 0; - mis_match = false; - cube_ptr = cube_buf; - s = 0; - i = 0; - do { - j = 0; - do { - k = 0; - do { - l = 0; - do { - m = 0; - do { - /* Sanity check */ - assert(s < cube_size); - - /* Check for correct value */ - if (*cube_ptr != expected_value) - mis_match = true; - - /* Advance to next element */ - cube_ptr++; - expected_value++; - s++; - m++; - } while ((cube_rank > 0) && (m < edge_size)); - l++; - } while ((cube_rank > 1) && (l < edge_size)); - k++; - } while ((cube_rank > 2) && (k < edge_size)); - j++; - } while ((cube_rank > 3) && (j < edge_size)); - i++; - } while ((cube_rank > 4) && (i < edge_size)); - if (mis_match) - TestErrPrintf("Initial cube data don't match! Line = %d\n", __LINE__); -} /* verify_select_hyper_contig_dr__run_test() */ -#endif -#if 0 - -/**************************************************************** -** -** test_select_hyper_contig_dr__run_test(): Test H5S (dataspace) -** selection code with contiguous source and target having -** different ranks but the same shape. We have already -** tested H5Sselect_shape_same in isolation, so now we try to do -** I/O. -** -****************************************************************/ -static void -test_select_hyper_contig_dr__run_test(int test_num, const uint16_t *cube_buf, const uint16_t *zero_buf, - unsigned edge_size, unsigned chunk_edge_size, unsigned small_rank, - unsigned large_rank, hid_t dset_type, hid_t xfer_plist) -{ - bool mis_match; /* Flag indicating a value read in wasn't what was expected */ - hid_t fapl; /* File access property list */ - hid_t fid1; /* File ID */ - hid_t small_cube_sid; /* Dataspace ID for small cube in memory & file */ - hid_t mem_large_cube_sid; /* Dataspace ID for large cube in memory */ - hid_t file_large_cube_sid; /* Dataspace ID for large cube in file */ - hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */ - hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */ - hid_t small_cube_dataset; /* Dataset ID */ - hid_t large_cube_dataset; /* Dataset ID */ - size_t start_index; /* Offset within buffer to begin inspecting */ - size_t stop_index; /* Offset within buffer to end inspecting */ - uint16_t expected_value; /* Expected value in dataset */ - uint16_t *small_cube_buf_1; /* Buffer for small cube data */ - uint16_t *large_cube_buf_1; /* Buffer for large cube data */ - uint16_t *ptr_1; /* Temporary pointer into cube data */ - hsize_t dims[SS_DR_MAX_RANK]; /* Dataspace dimensions */ - hsize_t start[SS_DR_MAX_RANK]; /* Shared hyperslab start offset */ - hsize_t stride[SS_DR_MAX_RANK]; /* Shared hyperslab stride */ - hsize_t count[SS_DR_MAX_RANK]; /* Shared hyperslab count */ - hsize_t block[SS_DR_MAX_RANK]; /* Shared hyperslab block size */ - hsize_t *start_ptr; /* Actual hyperslab start offset */ - hsize_t *stride_ptr; /* Actual hyperslab stride */ - hsize_t *count_ptr; /* Actual hyperslab count */ - hsize_t *block_ptr; /* Actual hyperslab block size */ - size_t small_cube_size; /* Number of elements in small cube */ - size_t large_cube_size; /* Number of elements in large cube */ - unsigned u, v, w, x; /* Local index variables */ - size_t s; /* Local index variable */ - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num)); - MESSAGE(7, ("\tranks = %u/%u, edge_size = %u, chunk_edge_size = %u.\n", small_rank, large_rank, edge_size, - chunk_edge_size)); - - assert(edge_size >= 6); - assert(edge_size >= chunk_edge_size); - assert((chunk_edge_size == 0) || (chunk_edge_size >= 3)); - assert(small_rank > 0); - assert(small_rank < large_rank); - assert(large_rank <= SS_DR_MAX_RANK); - - /* Compute cube sizes */ - small_cube_size = large_cube_size = (size_t)1; - for (u = 0; u < large_rank; u++) { - if (u < small_rank) - small_cube_size *= (size_t)edge_size; - - large_cube_size *= (size_t)edge_size; - } /* end for */ - - assert(large_cube_size < (size_t)UINT_MAX); - - /* set up the start, stride, count, and block pointers */ - start_ptr = &(start[SS_DR_MAX_RANK - large_rank]); - stride_ptr = &(stride[SS_DR_MAX_RANK - large_rank]); - count_ptr = &(count[SS_DR_MAX_RANK - large_rank]); - block_ptr = &(block[SS_DR_MAX_RANK - large_rank]); - - /* Allocate buffers */ - small_cube_buf_1 = (uint16_t *)calloc(sizeof(uint16_t), small_cube_size); - CHECK_PTR(small_cube_buf_1, "calloc"); - large_cube_buf_1 = (uint16_t *)calloc(sizeof(uint16_t), large_cube_size); - CHECK_PTR(large_cube_buf_1, "calloc"); - - /* Create a dataset transfer property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Use the 'core' VFD for this test */ - ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), false); - CHECK(ret, FAIL, "H5Pset_fapl_core"); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Close file access property list */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* setup dims: */ - dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = (hsize_t)edge_size; - - /* Create small cube dataspaces */ - small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); - CHECK(small_cube_sid, FAIL, "H5Screate_simple"); - - /* Create large cube dataspace */ - mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); - CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple"); - file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); - CHECK(file_large_cube_sid, FAIL, "H5Screate_simple"); - - /* if chunk edge size is greater than zero, set up the small and - * large data set creation property lists to specify chunked - * datasets. - */ - if (chunk_edge_size > 0) { - hsize_t chunk_dims[SS_DR_MAX_RANK]; /* Chunk dimensions */ - - chunk_dims[0] = chunk_dims[1] = chunk_dims[2] = chunk_dims[3] = chunk_dims[4] = - (hsize_t)chunk_edge_size; - - small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate"); - - ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED); - CHECK(ret, FAIL, "H5Pset_layout"); - - ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate"); - - ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED); - CHECK(ret, FAIL, "H5Pset_layout"); - - ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - } /* end if */ - - /* create the small cube dataset */ - small_cube_dataset = H5Dcreate2(fid1, "small_cube_dataset", dset_type, small_cube_sid, H5P_DEFAULT, - small_cube_dcpl_id, H5P_DEFAULT); - CHECK(small_cube_dataset, FAIL, "H5Dcreate2"); - - /* Close non-default small dataset DCPL */ - if (small_cube_dcpl_id != H5P_DEFAULT) { - ret = H5Pclose(small_cube_dcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - } /* end if */ - - /* create the large cube dataset */ - large_cube_dataset = H5Dcreate2(fid1, "large_cube_dataset", dset_type, file_large_cube_sid, H5P_DEFAULT, - large_cube_dcpl_id, H5P_DEFAULT); - CHECK(large_cube_dataset, FAIL, "H5Dcreate2"); - - /* Close non-default large dataset DCPL */ - if (large_cube_dcpl_id != H5P_DEFAULT) { - ret = H5Pclose(large_cube_dcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - } /* end if */ - - /* write initial data to the on disk datasets */ - ret = - H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, xfer_plist, cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, file_large_cube_sid, xfer_plist, - cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* read initial data from disk and verify that it is as expected. */ - ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, xfer_plist, - small_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* Check that the data is valid */ - verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size, edge_size, small_rank); - - ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, file_large_cube_sid, xfer_plist, - large_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* Check that the data is valid */ - verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size, edge_size, large_rank); - - /* first, verify that we can read from disk correctly using selections - * of different rank that H5Sselect_shape_same() views as being of the - * same shape. - * - * Start by reading small_rank-D slice from the on disk large cube, and - * verifying that the data read is correct. Verify that H5Sselect_shape_same() - * returns true on the memory and file selections. - */ - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to read slices of the large cube. - */ - for (u = 0; u < SS_DR_MAX_RANK; u++) { - start[u] = 0; - stride[u] = 1; - count[u] = 1; - if ((SS_DR_MAX_RANK - u) > small_rank) - block[u] = 1; - else - block[u] = (hsize_t)edge_size; - } /* end for */ - - u = 0; - do { - v = 0; - do { - w = 0; - do { - x = 0; - do { - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - start[0] = (hsize_t)u; - start[1] = (hsize_t)v; - start[2] = (hsize_t)w; - start[3] = (hsize_t)x; - start[4] = (hsize_t)0; - - ret = H5Sselect_hyperslab(file_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, - count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(small_cube_sid, file_large_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Read selection from disk */ - ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, file_large_cube_sid, - xfer_plist, small_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* verify that expected data is retrieved */ - mis_match = false; - ptr_1 = small_cube_buf_1; - expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + - (w * edge_size * edge_size) + (x * edge_size)); - for (s = 0; s < small_cube_size; s++) { - if (*ptr_1 != expected_value) - mis_match = true; - ptr_1++; - expected_value++; - } /* end for */ - if (mis_match) - TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__); - - x++; - } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); - w++; - } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); - v++; - } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); - u++; - } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); - - /* similarly, read the on disk small cube into slices through the in memory - * large cube, and verify that the correct data (and only the correct data) - * is read. - */ - - /* zero out the in-memory large cube */ - memset(large_cube_buf_1, 0, large_cube_size * sizeof(uint16_t)); - - u = 0; - do { - v = 0; - do { - w = 0; - do { - x = 0; - do { - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - start[0] = (hsize_t)u; - start[1] = (hsize_t)v; - start[2] = (hsize_t)w; - start[3] = (hsize_t)x; - start[4] = (hsize_t)0; - - ret = H5Sselect_hyperslab(mem_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, - count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(small_cube_sid, mem_large_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Read selection from disk */ - ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, small_cube_sid, - xfer_plist, large_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* verify that the expected data and only the - * expected data was read. - */ - start_index = (u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + - (x * edge_size); - stop_index = start_index + small_cube_size - 1; - - assert(start_index < stop_index); - assert(stop_index <= large_cube_size); - - mis_match = false; - ptr_1 = large_cube_buf_1; - expected_value = 0; - for (s = 0; s < start_index; s++) { - if (*ptr_1 != 0) - mis_match = true; - ptr_1++; - } /* end for */ - for (; s <= stop_index; s++) { - if (*ptr_1 != expected_value) - mis_match = true; - expected_value++; - ptr_1++; - } /* end for */ - for (; s < large_cube_size; s++) { - if (*ptr_1 != 0) - mis_match = true; - ptr_1++; - } /* end for */ - if (mis_match) - TestErrPrintf("large cube read from small cube has bad data! Line=%u\n", __LINE__); - - /* Zero out the buffer for the next pass */ - memset(large_cube_buf_1 + start_index, 0, small_cube_size * sizeof(uint16_t)); - - x++; - } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); - w++; - } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); - v++; - } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); - u++; - } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); - - /* now we go in the opposite direction, verifying that we can write - * from memory to file using selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * Start by writing small_rank D slices from the in memory large cube, to - * the the on disk small cube dataset. After each write, read the small - * cube dataset back from disk, and verify that it contains the expected - * data. Verify that H5Sselect_shape_same() returns true on the - * memory and file selections. - */ - - u = 0; - do { - v = 0; - do { - w = 0; - do { - x = 0; - do { - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - /* zero out the on disk small cube */ - ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, - xfer_plist, zero_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* select the portion of the in memory large cube from which we - * are going to write data. - */ - start[0] = (hsize_t)u; - start[1] = (hsize_t)v; - start[2] = (hsize_t)w; - start[3] = (hsize_t)x; - start[4] = (hsize_t)0; - - ret = H5Sselect_hyperslab(mem_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, - count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* verify that H5Sselect_shape_same() reports the in - * memory slice through the cube selection and the - * on disk full small cube selections as having the same shape. - */ - check = H5Sselect_shape_same(small_cube_sid, mem_large_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* write the slice from the in memory large cube to the on disk small cube */ - ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, small_cube_sid, - xfer_plist, cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* read the on disk small cube into memory */ - ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, - xfer_plist, small_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* verify that expected data is retrieved */ - mis_match = false; - ptr_1 = small_cube_buf_1; - expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + - (w * edge_size * edge_size) + (x * edge_size)); - for (s = 0; s < small_cube_size; s++) { - if (*ptr_1 != expected_value) - mis_match = true; - expected_value++; - ptr_1++; - } /* end for */ - if (mis_match) - TestErrPrintf("small cube data don't match! Line=%d\n", __LINE__); - - x++; - } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); - w++; - } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); - v++; - } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); - u++; - } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); - - /* Now write the contents of the in memory small cube to slices of - * the on disk cube. After each write, read the on disk cube - * into memory, and verify that it contains the expected - * data. Verify that H5Sselect_shape_same() returns true on - * the memory and file selections. - */ - - /* select the entire memory and file cube dataspaces */ - ret = H5Sselect_all(mem_large_cube_sid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Sselect_all(file_large_cube_sid); - CHECK(ret, FAIL, "H5Sselect_all"); - - u = 0; - do { - v = 0; - do { - w = 0; - do { - x = 0; - do { - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - /* zero out the on disk cube */ - ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_USHORT, mem_large_cube_sid, - file_large_cube_sid, xfer_plist, zero_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* select the portion of the in memory large cube to which we - * are going to write data. - */ - start[0] = (hsize_t)u; - start[1] = (hsize_t)v; - start[2] = (hsize_t)w; - start[3] = (hsize_t)x; - start[4] = (hsize_t)0; - - ret = H5Sselect_hyperslab(file_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr, - count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* verify that H5Sselect_shape_same() reports the in - * memory full selection of the small cube and the - * on disk slice through the large cube selection - * as having the same shape. - */ - check = H5Sselect_shape_same(small_cube_sid, file_large_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* write the cube from memory to the target slice of the disk cube */ - ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, file_large_cube_sid, - xfer_plist, cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* read the on disk cube into memory */ - ret = H5Sselect_all(file_large_cube_sid); - CHECK(ret, FAIL, "H5Sselect_all"); - - ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, - file_large_cube_sid, xfer_plist, large_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* verify that the expected data and only the - * expected data was read. - */ - start_index = (u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + - (x * edge_size); - stop_index = start_index + small_cube_size - 1; - - assert(start_index < stop_index); - assert(stop_index <= large_cube_size); - - mis_match = false; - ptr_1 = large_cube_buf_1; - expected_value = 0; - for (s = 0; s < start_index; s++) { - if (*ptr_1 != 0) - mis_match = true; - ptr_1++; - } /* end for */ - for (; s <= stop_index; s++) { - if (*ptr_1 != expected_value) - mis_match = true; - expected_value++; - ptr_1++; - } /* end for */ - for (; s < large_cube_size; s++) { - if (*ptr_1 != 0) - mis_match = true; - ptr_1++; - } /* end for */ - if (mis_match) - TestErrPrintf("large cube written from small cube has bad data! Line=%d\n", __LINE__); - - x++; - } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size)); - w++; - } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size)); - v++; - } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size)); - u++; - } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size)); - - /* Close memory dataspaces */ - ret = H5Sclose(small_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(mem_large_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(file_large_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Datasets */ - ret = H5Dclose(small_cube_dataset); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Dclose(large_cube_dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(small_cube_buf_1); - free(large_cube_buf_1); - -} /* test_select_hyper_contig_dr__run_test() */ -#endif -#if 0 -/**************************************************************** -** -** test_select_hyper_contig_dr(): Test H5S (dataspace) -** selection code with contiguous source and target having -** different ranks but the same shape. We have already -** tested H5Sselect_shape_same in isolation, so now we try to do -** I/O. -** -****************************************************************/ -static void -test_select_hyper_contig_dr(hid_t dset_type, hid_t xfer_plist) -{ - int test_num = 0; - unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */ - unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */ - unsigned small_rank; /* Current rank of small dataset */ - unsigned large_rank; /* Current rank of large dataset */ - uint16_t *cube_buf; /* Buffer for writing cube data */ - uint16_t *zero_buf; /* Buffer for writing zeroed cube data */ - uint16_t *cube_ptr; /* Temporary pointer into cube data */ - unsigned max_rank = 5; /* Max. rank to use */ - size_t max_cube_size; /* Max. number of elements in largest cube */ - size_t s; /* Local index variable */ - unsigned u; /* Local index variable */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Contiguous Hyperslabs With Different Rank I/O Functionality\n")); - - /* Compute max. cube size */ - max_cube_size = (size_t)1; - for (u = 0; u < max_rank; u++) - max_cube_size *= (size_t)edge_size; - - /* Allocate cube buffer for writing values */ - cube_buf = (uint16_t *)malloc(sizeof(uint16_t) * max_cube_size); - CHECK_PTR(cube_buf, "malloc"); - - /* Initialize the cube buffer */ - cube_ptr = cube_buf; - for (s = 0; s < max_cube_size; s++) - *cube_ptr++ = (uint16_t)s; - - /* Allocate cube buffer for zeroing values on disk */ - zero_buf = (uint16_t *)calloc(sizeof(uint16_t), max_cube_size); - CHECK_PTR(zero_buf, "calloc"); - - for (large_rank = 1; large_rank <= max_rank; large_rank++) { - for (small_rank = 1; small_rank < large_rank; small_rank++) { - chunk_edge_size = 0; - test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf, edge_size, chunk_edge_size, - small_rank, large_rank, dset_type, xfer_plist); - test_num++; - - chunk_edge_size = 3; - test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf, edge_size, chunk_edge_size, - small_rank, large_rank, dset_type, xfer_plist); - test_num++; - } /* for loop on small rank */ - } /* for loop on large rank */ - - free(cube_buf); - free(zero_buf); - -} /* test_select_hyper_contig_dr() */ -#endif -/**************************************************************** -** -** test_select_hyper_checker_board_dr__select_checker_board(): -** Given an n-cube dataspace with each edge of length -** edge_size, and a checker_edge_size either select a checker -** board selection of the entire cube(if sel_rank == n), -** or select a checker board selection of a -** sel_rank dimensional slice through n-cube parallel to the -** sel_rank fastest changing indices, with origin (in the -** higher indices) as indicated by the start array. -** -** Note that this function, like all its relatives, is -** hard coded to presume a maximum n-cube rank of 5. -** While this maximum is declared as a constant, increasing -** it will require extensive coding in addition to changing -** the value of the constant. -** -** JRM -- 9/9/09 -** -****************************************************************/ -#if 0 -static void -test_select_hyper_checker_board_dr__select_checker_board(hid_t tgt_n_cube_sid, unsigned tgt_n_cube_rank, - unsigned edge_size, unsigned checker_edge_size, - unsigned sel_rank, const hsize_t sel_start[]) -{ - bool first_selection = true; - unsigned n_cube_offset; - unsigned sel_offset; - hsize_t base_count; - hsize_t offset_count; - hsize_t start[SS_DR_MAX_RANK]; /* Offset of hyperslab selection */ - hsize_t stride[SS_DR_MAX_RANK]; /* Stride of hyperslab selection */ - hsize_t count[SS_DR_MAX_RANK]; /* Count of hyperslab selection */ - hsize_t block[SS_DR_MAX_RANK]; /* Block size of hyperslab selection */ - unsigned i, j, k, l, m; /* Local index variable */ - unsigned u; /* Local index variables */ - herr_t ret; /* Generic return value */ - - assert(edge_size >= 6); - assert(0 < checker_edge_size); - assert(checker_edge_size <= edge_size); - assert(0 < sel_rank); - assert(sel_rank <= tgt_n_cube_rank); - assert(tgt_n_cube_rank <= SS_DR_MAX_RANK); - - sel_offset = SS_DR_MAX_RANK - sel_rank; - n_cube_offset = SS_DR_MAX_RANK - tgt_n_cube_rank; - assert(n_cube_offset <= sel_offset); - - /* First, compute the base count (which assumes start == 0 - * for the associated offset) and offset_count (which - * assumes start == checker_edge_size for the associated - * offset). - */ - base_count = edge_size / (checker_edge_size * 2); - if ((edge_size % (checker_edge_size * 2)) > 0) - base_count++; - - offset_count = (edge_size - checker_edge_size) / (checker_edge_size * 2); - if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) - offset_count++; - - /* Now set up the stride and block arrays, and portions of the start - * and count arrays that will not be altered during the selection of - * the checker board. - */ - u = 0; - while (u < n_cube_offset) { - /* these values should never be used */ - start[u] = 0; - stride[u] = 0; - count[u] = 0; - block[u] = 0; - - u++; - } /* end while */ - - while (u < sel_offset) { - start[u] = sel_start[u]; - stride[u] = 2 * edge_size; - count[u] = 1; - block[u] = 1; - - u++; - } /* end while */ - - while (u < SS_DR_MAX_RANK) { - stride[u] = 2 * checker_edge_size; - block[u] = checker_edge_size; - - u++; - } /* end while */ - - i = 0; - do { - if (0 >= sel_offset) { - if (i == 0) { - start[0] = 0; - count[0] = base_count; - } /* end if */ - else { - start[0] = checker_edge_size; - count[0] = offset_count; - } /* end else */ - } /* end if */ - - j = 0; - do { - if (1 >= sel_offset) { - if (j == 0) { - start[1] = 0; - count[1] = base_count; - } /* end if */ - else { - start[1] = checker_edge_size; - count[1] = offset_count; - } /* end else */ - } /* end if */ - - k = 0; - do { - if (2 >= sel_offset) { - if (k == 0) { - start[2] = 0; - count[2] = base_count; - } /* end if */ - else { - start[2] = checker_edge_size; - count[2] = offset_count; - } /* end else */ - } /* end if */ - - l = 0; - do { - if (3 >= sel_offset) { - if (l == 0) { - start[3] = 0; - count[3] = base_count; - } /* end if */ - else { - start[3] = checker_edge_size; - count[3] = offset_count; - } /* end else */ - } /* end if */ - - m = 0; - do { - if (4 >= sel_offset) { - if (m == 0) { - start[4] = 0; - count[4] = base_count; - } /* end if */ - else { - start[4] = checker_edge_size; - count[4] = offset_count; - } /* end else */ - } /* end if */ - - if (((i + j + k + l + m) % 2) == 0) { - if (first_selection) { - first_selection = false; - - ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_SET, - &(start[n_cube_offset]), &(stride[n_cube_offset]), - &(count[n_cube_offset]), &(block[n_cube_offset])); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end if */ - else { - ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_OR, - &(start[n_cube_offset]), &(stride[n_cube_offset]), - &(count[n_cube_offset]), &(block[n_cube_offset])); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end else */ - } /* end if */ - - m++; - } while ((m <= 1) && (4 >= sel_offset)); - l++; - } while ((l <= 1) && (3 >= sel_offset)); - k++; - } while ((k <= 1) && (2 >= sel_offset)); - j++; - } while ((j <= 1) && (1 >= sel_offset)); - i++; - } while ((i <= 1) && (0 >= sel_offset)); - - /* Weirdness alert: - * - * Some how, it seems that selections can extend beyond the - * boundaries of the target dataspace -- hence the following - * code to manually clip the selection back to the dataspace - * proper. - */ - for (u = 0; u < SS_DR_MAX_RANK; u++) { - start[u] = 0; - stride[u] = edge_size; - count[u] = 1; - block[u] = edge_size; - } /* end for */ - - ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_AND, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -} /* test_select_hyper_checker_board_dr__select_checker_board() */ -#endif - -/**************************************************************** -** -** test_select_hyper_checker_board_dr__verify_data(): -** -** Examine the supplied buffer to see if it contains the -** expected data. Return true if it does, and false -** otherwise. -** -** The supplied buffer is presumed to contain the results -** of read or writing a checkerboard selection of an -** n-cube, or a checkerboard selection of an m (1 <= m < n) -** dimensional slice through an n-cube parallel to the -** fastest changing indices. -** -** It is further presumed that the buffer was zeroed before -** the read, and that the n-cube was initialize with the -** natural numbers listed in order from the origin along -** the fastest changing axis. -** -** Thus for a 10x10x10 3-cube, the value stored in location -** (x, y, z) (assuming that z is the fastest changing index -** and x the slowest) is assumed to be: -** -** (10 * 10 * x) + (10 * y) + z -** -** Thus, if the buffer contains the result of reading a -** checker board selection of a 10x10x10 3-cube, location -** (x, y, z) will contain zero if it is not in a checker, -** and 100x + 10y + z if (x, y, z) is in a checker. -** -** If the buffer contains the result of reading a 3 -** dimensional slice (parallel to the three fastest changing -** indices) through an n cube (n > 3), then the expected -** values in the buffer will be the same, save that we will -** add a constant determined by the origin of the 3-cube -** in the n-cube. -** -** Finally, the function presumes that the first element -** of the buffer resides either at the origin of either -** a selected or an unselected checker. -** -****************************************************************/ -#if 0 -H5_ATTR_PURE static bool -test_select_hyper_checker_board_dr__verify_data(uint16_t *buf_ptr, unsigned rank, unsigned edge_size, - unsigned checker_edge_size, uint16_t first_expected_val, - bool buf_starts_in_checker) -{ - bool good_data = true; - bool in_checker; - bool start_in_checker[5]; - uint16_t expected_value; - uint16_t *val_ptr; - unsigned i, j, k, l, m; /* to track position in n-cube */ - unsigned v, w, x, y, z; /* to track position in checker */ - const unsigned test_max_rank = 5; /* code changes needed if this is increased */ - - assert(buf_ptr != NULL); - assert(0 < rank); - assert(rank <= test_max_rank); - assert(edge_size >= 6); - assert(0 < checker_edge_size); - assert(checker_edge_size <= edge_size); - assert(test_max_rank <= SS_DR_MAX_RANK); - - val_ptr = buf_ptr; - expected_value = first_expected_val; - - i = 0; - v = 0; - start_in_checker[0] = buf_starts_in_checker; - do { - if (v >= checker_edge_size) { - start_in_checker[0] = !start_in_checker[0]; - v = 0; - } /* end if */ - - j = 0; - w = 0; - start_in_checker[1] = start_in_checker[0]; - do { - if (w >= checker_edge_size) { - start_in_checker[1] = !start_in_checker[1]; - w = 0; - } /* end if */ - - k = 0; - x = 0; - start_in_checker[2] = start_in_checker[1]; - do { - if (x >= checker_edge_size) { - start_in_checker[2] = !start_in_checker[2]; - x = 0; - } /* end if */ - - l = 0; - y = 0; - start_in_checker[3] = start_in_checker[2]; - do { - if (y >= checker_edge_size) { - start_in_checker[3] = !start_in_checker[3]; - y = 0; - } /* end if */ - - m = 0; - z = 0; - in_checker = start_in_checker[3]; - do { - if (z >= checker_edge_size) { - in_checker = !in_checker; - z = 0; - } /* end if */ - - if (in_checker) { - if (*val_ptr != expected_value) - good_data = false; - } /* end if */ - else { - if (*val_ptr != 0) - good_data = false; - } /* end else */ - - val_ptr++; - expected_value++; - - m++; - z++; - } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); - l++; - y++; - } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); - k++; - x++; - } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); - j++; - w++; - } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); - i++; - v++; - } while ((rank >= test_max_rank) && (i < edge_size)); - - return (good_data); -} /* test_select_hyper_checker_board_dr__verify_data() */ -#endif - -/**************************************************************** -** -** test_select_hyper_checker_board_dr__run_test(): Test H5S -** (dataspace) selection code with checker board source and -** target selections having different ranks but the same -** shape. We have already tested H5Sselect_shape_same in -** isolation, so now we try to do I/O. -** -****************************************************************/ -#if 0 -static void -test_select_hyper_checker_board_dr__run_test(int test_num, const uint16_t *cube_buf, const uint16_t *zero_buf, - unsigned edge_size, unsigned checker_edge_size, - unsigned chunk_edge_size, unsigned small_rank, - unsigned large_rank, hid_t dset_type, hid_t xfer_plist) -{ - bool data_ok; - hid_t fapl; /* File access property list */ - hid_t fid; /* HDF5 File IDs */ - hid_t full_small_cube_sid; /* Dataspace for small cube w/all selection */ - hid_t mem_small_cube_sid; - hid_t file_small_cube_sid; - hid_t full_large_cube_sid; /* Dataspace for large cube w/all selection */ - hid_t mem_large_cube_sid; - hid_t file_large_cube_sid; - hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */ - hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */ - hid_t small_cube_dataset; /* Dataset ID */ - hid_t large_cube_dataset; /* Dataset ID */ - unsigned small_rank_offset; /* Rank offset of slice */ - const unsigned test_max_rank = 5; /* must update code if this changes */ - size_t start_index; /* Offset within buffer to begin inspecting */ - size_t stop_index; /* Offset within buffer to end inspecting */ - uint16_t expected_value; - uint16_t *small_cube_buf_1; - uint16_t *large_cube_buf_1; - uint16_t *ptr_1; - size_t small_cube_size; /* Number of elements in small cube */ - size_t large_cube_size; /* Number of elements in large cube */ - hsize_t dims[SS_DR_MAX_RANK]; - hsize_t chunk_dims[SS_DR_MAX_RANK]; - hsize_t sel_start[SS_DR_MAX_RANK]; - unsigned u, v, w, x; /* Local index variables */ - size_t s; /* Local index variable */ - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num)); - MESSAGE(7, ("\tranks = %d/%d, edge_size = %d, checker_edge_size = %d, chunk_edge_size = %d.\n", - small_rank, large_rank, edge_size, checker_edge_size, chunk_edge_size)); - - assert(edge_size >= 6); - assert(checker_edge_size > 0); - assert(checker_edge_size <= edge_size); - assert(edge_size >= chunk_edge_size); - assert((chunk_edge_size == 0) || (chunk_edge_size >= 3)); - assert(small_rank > 0); - assert(small_rank < large_rank); - assert(large_rank <= test_max_rank); - assert(test_max_rank <= SS_DR_MAX_RANK); - - /* Compute cube sizes */ - small_cube_size = large_cube_size = (size_t)1; - for (u = 0; u < large_rank; u++) { - if (u < small_rank) - small_cube_size *= (size_t)edge_size; - - large_cube_size *= (size_t)edge_size; - } /* end for */ - assert(large_cube_size < (size_t)(UINT_MAX)); - - small_rank_offset = test_max_rank - small_rank; - assert(small_rank_offset >= 1); - - /* also, at present, we use 16 bit values in this test -- - * hence the following assertion. Delete it if we convert - * to 32 bit values. - */ - assert(large_cube_size < (size_t)(64 * 1024)); - - /* Allocate & initialize buffers */ - small_cube_buf_1 = (uint16_t *)calloc(sizeof(uint16_t), small_cube_size); - CHECK_PTR(small_cube_buf_1, "calloc"); - large_cube_buf_1 = (uint16_t *)calloc(sizeof(uint16_t), large_cube_size); - CHECK_PTR(large_cube_buf_1, "calloc"); - - /* Create a dataset transfer property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Use the 'core' VFD for this test */ - ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), false); - CHECK(ret, FAIL, "H5Pset_fapl_core"); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Close file access property list */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* setup dims: */ - dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = edge_size; - - /* Create small cube dataspaces */ - full_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); - CHECK(full_small_cube_sid, FAIL, "H5Screate_simple"); - - mem_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); - CHECK(mem_small_cube_sid, FAIL, "H5Screate_simple"); - - file_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL); - CHECK(file_small_cube_sid, FAIL, "H5Screate_simple"); - - /* Create large cube dataspace */ - full_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); - CHECK(full_large_cube_sid, FAIL, "H5Screate_simple"); - - mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); - CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple"); - - file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL); - CHECK(file_large_cube_sid, FAIL, "H5Screate_simple"); - - /* if chunk edge size is greater than zero, set up the small and - * large data set creation property lists to specify chunked - * datasets. - */ - if (chunk_edge_size > 0) { - chunk_dims[0] = chunk_dims[1] = chunk_dims[2] = chunk_dims[3] = chunk_dims[4] = chunk_edge_size; - - small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate"); - - ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED); - CHECK(ret, FAIL, "H5Pset_layout"); - - ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate"); - - ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED); - CHECK(ret, FAIL, "H5Pset_layout"); - - ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - } /* end if */ - - /* create the small cube dataset */ - small_cube_dataset = H5Dcreate2(fid, "small_cube_dataset", dset_type, file_small_cube_sid, H5P_DEFAULT, - small_cube_dcpl_id, H5P_DEFAULT); - CHECK(small_cube_dataset, FAIL, "H5Dcreate2"); - - /* Close non-default small dataset DCPL */ - if (small_cube_dcpl_id != H5P_DEFAULT) { - ret = H5Pclose(small_cube_dcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - } /* end if */ - - /* create the large cube dataset */ - large_cube_dataset = H5Dcreate2(fid, "large_cube_dataset", dset_type, file_large_cube_sid, H5P_DEFAULT, - large_cube_dcpl_id, H5P_DEFAULT); - CHECK(large_cube_dataset, FAIL, "H5Dcreate2"); - - /* Close non-default large dataset DCPL */ - if (large_cube_dcpl_id != H5P_DEFAULT) { - ret = H5Pclose(large_cube_dcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - } /* end if */ - - /* write initial data to the on disk datasets */ - ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, full_small_cube_sid, - xfer_plist, cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, full_large_cube_sid, - xfer_plist, cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* read initial small cube data from disk and verify that it is as expected. */ - ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, full_small_cube_sid, xfer_plist, - small_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* Check that the data is valid */ - verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size, edge_size, small_rank); - - /* read initial large cube data from disk and verify that it is as expected. */ - ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, full_large_cube_sid, xfer_plist, - large_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* Check that the data is valid */ - verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size, edge_size, large_rank); - - /* first, verify that we can read from disk correctly using selections - * of different rank that H5Sselect_shape_same() views as being of the - * same shape. - * - * Start by reading small_rank-D slice from the on disk large cube, and - * verifying that the data read is correct. Verify that H5Sselect_shape_same() - * returns true on the memory and file selections. - * - * The first step is to set up the needed checker board selection in the - * in memory small small cube - */ - - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - - test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid, small_rank, edge_size, - checker_edge_size, small_rank, sel_start); - - /* now read slices from the large, on-disk cube into the small cube. - * Note how we adjust sel_start only in the dimensions peculiar to the - * large cube. - */ - - u = 0; - do { - if (small_rank_offset > 0) - sel_start[0] = u; - - v = 0; - do { - if (small_rank_offset > 1) - sel_start[1] = v; - - w = 0; - do { - if (small_rank_offset > 2) - sel_start[2] = w; - - x = 0; - do { - if (small_rank_offset > 3) - sel_start[3] = x; - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - assert((sel_start[0] == 0) || (0 < small_rank_offset)); - assert((sel_start[1] == 0) || (1 < small_rank_offset)); - assert((sel_start[2] == 0) || (2 < small_rank_offset)); - assert((sel_start[3] == 0) || (3 < small_rank_offset)); - assert((sel_start[4] == 0) || (4 < small_rank_offset)); - - test_select_hyper_checker_board_dr__select_checker_board( - file_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(mem_small_cube_sid, file_large_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* zero the buffer that we will be using for reading */ - memset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size); - - /* Read selection from disk */ - ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_small_cube_sid, - file_large_cube_sid, xfer_plist, small_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + - (w * edge_size * edge_size) + (x * edge_size)); - - data_ok = test_select_hyper_checker_board_dr__verify_data(small_cube_buf_1, small_rank, - edge_size, checker_edge_size, - expected_value, (bool)true); - if (!data_ok) - TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__); - - x++; - } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && - (x < edge_size)); - w++; - } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && - (w < edge_size)); - v++; - } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && - (v < edge_size)); - u++; - } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); - - /* similarly, read the on disk small cube into slices through the in memory - * large cube, and verify that the correct data (and only the correct data) - * is read. - */ - - /* select a checker board in the file small cube dataspace */ - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid, small_rank, edge_size, - checker_edge_size, small_rank, sel_start); - - u = 0; - do { - if (0 < small_rank_offset) - sel_start[0] = u; - - v = 0; - do { - if (1 < small_rank_offset) - sel_start[1] = v; - - w = 0; - do { - if (2 < small_rank_offset) - sel_start[2] = w; - - x = 0; - do { - if (3 < small_rank_offset) - sel_start[3] = x; - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - assert((sel_start[0] == 0) || (0 < small_rank_offset)); - assert((sel_start[1] == 0) || (1 < small_rank_offset)); - assert((sel_start[2] == 0) || (2 < small_rank_offset)); - assert((sel_start[3] == 0) || (3 < small_rank_offset)); - assert((sel_start[4] == 0) || (4 < small_rank_offset)); - - test_select_hyper_checker_board_dr__select_checker_board( - mem_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(file_small_cube_sid, mem_large_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* zero out the in memory large cube */ - memset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size); - - /* Read selection from disk */ - ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, - file_small_cube_sid, xfer_plist, large_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* verify that the expected data and only the - * expected data was read. - */ - data_ok = true; - ptr_1 = large_cube_buf_1; - expected_value = 0; - start_index = (u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + - (x * edge_size); - stop_index = start_index + small_cube_size - 1; - - assert(start_index < stop_index); - assert(stop_index <= large_cube_size); - - /* verify that the large cube contains only zeros before the slice */ - for (s = 0; s < start_index; s++) { - if (*ptr_1 != 0) - data_ok = false; - ptr_1++; - } /* end for */ - assert(s == start_index); - - data_ok &= test_select_hyper_checker_board_dr__verify_data( - ptr_1, small_rank, edge_size, checker_edge_size, (uint16_t)0, (bool)true); - - ptr_1 += small_cube_size; - s += small_cube_size; - - assert(s == stop_index + 1); - - /* verify that the large cube contains only zeros after the slice */ - for (s = stop_index + 1; s < large_cube_size; s++) { - if (*ptr_1 != 0) - data_ok = false; - ptr_1++; - } /* end for */ - if (!data_ok) - TestErrPrintf("large cube read from small cube has bad data! Line=%d\n", __LINE__); - - x++; - } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && - (x < edge_size)); - w++; - } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && - (w < edge_size)); - v++; - } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && - (v < edge_size)); - u++; - } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); - - /* now we go in the opposite direction, verifying that we can write - * from memory to file using selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * Start by writing small_rank D slices from the in memory large cube, to - * the the on disk small cube dataset. After each write, read the small - * cube dataset back from disk, and verify that it contains the expected - * data. Verify that H5Sselect_shape_same() returns true on the - * memory and file selections. - */ - - /* select a checker board in the file small cube dataspace */ - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid, small_rank, edge_size, - checker_edge_size, small_rank, sel_start); - - u = 0; - do { - if (small_rank_offset > 0) - sel_start[0] = u; - - v = 0; - do { - if (small_rank_offset > 1) - sel_start[1] = v; - - w = 0; - do { - if (small_rank_offset > 2) - sel_start[2] = w; - - x = 0; - do { - if (small_rank_offset > 3) - sel_start[3] = x; - - /* zero out the on disk small cube */ - ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, - full_small_cube_sid, xfer_plist, zero_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - assert((sel_start[0] == 0) || (0 < small_rank_offset)); - assert((sel_start[1] == 0) || (1 < small_rank_offset)); - assert((sel_start[2] == 0) || (2 < small_rank_offset)); - assert((sel_start[3] == 0) || (3 < small_rank_offset)); - assert((sel_start[4] == 0) || (4 < small_rank_offset)); - - test_select_hyper_checker_board_dr__select_checker_board( - mem_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(file_small_cube_sid, mem_large_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* write the slice from the in memory large cube to the - * on disk small cube - */ - ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, - file_small_cube_sid, xfer_plist, cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* zero the buffer that we will be using for reading */ - memset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size); - - /* read the on disk small cube into memory */ - ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, - full_small_cube_sid, xfer_plist, small_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + - (w * edge_size * edge_size) + (x * edge_size)); - - data_ok = test_select_hyper_checker_board_dr__verify_data(small_cube_buf_1, small_rank, - edge_size, checker_edge_size, - expected_value, (bool)true); - if (!data_ok) - TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__); - - x++; - } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && - (x < edge_size)); - w++; - } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && - (w < edge_size)); - v++; - } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && - (v < edge_size)); - u++; - } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); - - /* Now write checker board selections of the entries in memory - * small cube to slices of the on disk cube. After each write, - * read the on disk large cube * into memory, and verify that - * it contains the expected * data. Verify that - * H5Sselect_shape_same() returns true on the memory and file - * selections. - */ - - /* select a checker board in the in memory small cube dataspace */ - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid, small_rank, edge_size, - checker_edge_size, small_rank, sel_start); - - u = 0; - do { - if (small_rank_offset > 0) - sel_start[0] = u; - - v = 0; - do { - if (small_rank_offset > 1) - sel_start[1] = v; - - w = 0; - do { - if (small_rank_offset > 2) - sel_start[2] = w; - - x = 0; - do { - if (small_rank_offset > 3) - sel_start[3] = x; - - /* zero out the on disk cube */ - ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_USHORT, full_large_cube_sid, - full_large_cube_sid, xfer_plist, zero_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - assert((sel_start[0] == 0) || (0 < small_rank_offset)); - assert((sel_start[1] == 0) || (1 < small_rank_offset)); - assert((sel_start[2] == 0) || (2 < small_rank_offset)); - assert((sel_start[3] == 0) || (3 < small_rank_offset)); - assert((sel_start[4] == 0) || (4 < small_rank_offset)); - - test_select_hyper_checker_board_dr__select_checker_board( - file_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(file_large_cube_sid, mem_small_cube_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* write the checker board selection of the in memory - * small cube to a slice through the on disk large - * cube. - */ - ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, mem_small_cube_sid, - file_large_cube_sid, xfer_plist, cube_buf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* zero out the in memory large cube */ - memset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size); - - /* read the on disk large cube into memory */ - ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, - full_large_cube_sid, xfer_plist, large_cube_buf_1); - CHECK(ret, FAIL, "H5Dread"); - - /* verify that the expected data and only the - * expected data was written to the on disk large - * cube. - */ - data_ok = true; - ptr_1 = large_cube_buf_1; - expected_value = 0; - start_index = (u * edge_size * edge_size * edge_size * edge_size) + - (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) + - (x * edge_size); - stop_index = start_index + small_cube_size - 1; - - assert(start_index < stop_index); - assert(stop_index <= large_cube_size); - - /* verify that the large cube contains only zeros before the slice */ - for (s = 0; s < start_index; s++) { - if (*ptr_1 != 0) - data_ok = false; - ptr_1++; - } /* end for */ - assert(s == start_index); - - /* verify that the slice contains the expected data */ - data_ok &= test_select_hyper_checker_board_dr__verify_data( - ptr_1, small_rank, edge_size, checker_edge_size, (uint16_t)0, (bool)true); - - ptr_1 += small_cube_size; - s += small_cube_size; - - assert(s == stop_index + 1); - - /* verify that the large cube contains only zeros after the slice */ - for (s = stop_index + 1; s < large_cube_size; s++) { - if (*ptr_1 != 0) - data_ok = false; - ptr_1++; - } /* end for */ - if (!data_ok) - TestErrPrintf("large cube written from small cube has bad data! Line=%d\n", __LINE__); - - x++; - } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) && - (x < edge_size)); - w++; - } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) && - (w < edge_size)); - v++; - } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) && - (v < edge_size)); - u++; - } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size)); - - /* Close memory dataspaces */ - ret = H5Sclose(full_small_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(full_large_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(mem_small_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(mem_large_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(file_small_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(file_large_cube_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Datasets */ - ret = H5Dclose(small_cube_dataset); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Dclose(large_cube_dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(small_cube_buf_1); - free(large_cube_buf_1); - -} /* test_select_hyper_checker_board_dr__run_test() */ -#endif -/**************************************************************** -** -** test_select_hyper_checker_board_dr(): Test H5S (dataspace) -** selection code with checkerboard source and target having -** different ranks but the same shape. We have already -** tested H5Sselect_shape_same in isolation, so now we try to do -** I/O. -** -** This is just an initial smoke check, so we will work -** with a slice through a cube only. -** -****************************************************************/ -#if 0 -static void -test_select_hyper_checker_board_dr(hid_t dset_type, hid_t xfer_plist) -{ - uint16_t *cube_buf; /* Buffer for writing cube data */ - uint16_t *cube_ptr; /* Temporary pointer into cube data */ - uint16_t *zero_buf; /* Buffer for writing zeroed cube data */ - int test_num = 0; - unsigned checker_edge_size = 2; /* Size of checkerboard dimension */ - unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */ - unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */ - unsigned small_rank; /* Current rank of small dataset */ - unsigned large_rank; /* Current rank of large dataset */ - unsigned max_rank = 5; /* Max. rank to use */ - size_t max_cube_size; /* Max. number of elements in largest cube */ - size_t s; /* Local index variable */ - unsigned u; /* Local index variable */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Checker Board Hyperslabs With Different Rank I/O Functionality\n")); - - /* Compute max. cube size */ - max_cube_size = (size_t)1; - for (u = 0; u < max_rank; u++) - max_cube_size *= (size_t)(edge_size + 1); - - /* Allocate cube buffer for writing values */ - cube_buf = (uint16_t *)malloc(sizeof(uint16_t) * max_cube_size); - CHECK_PTR(cube_buf, "malloc"); - - /* Initialize the cube buffer */ - cube_ptr = cube_buf; - for (s = 0; s < max_cube_size; s++) - *cube_ptr++ = (uint16_t)s; - - /* Allocate cube buffer for zeroing values on disk */ - zero_buf = (uint16_t *)calloc(sizeof(uint16_t), max_cube_size); - CHECK_PTR(zero_buf, "calloc"); - - for (large_rank = 1; large_rank <= max_rank; large_rank++) { - for (small_rank = 1; small_rank < large_rank; small_rank++) { - chunk_edge_size = 0; - test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size, - checker_edge_size, chunk_edge_size, small_rank, - large_rank, dset_type, xfer_plist); - test_num++; - - test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size + 1, - checker_edge_size, chunk_edge_size, small_rank, - large_rank, dset_type, xfer_plist); - test_num++; - - chunk_edge_size = 3; - test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size, - checker_edge_size, chunk_edge_size, small_rank, - large_rank, dset_type, xfer_plist); - test_num++; - - test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size + 1, - checker_edge_size, chunk_edge_size, small_rank, - large_rank, dset_type, xfer_plist); - test_num++; - } /* for loop on small rank */ - } /* for loop on large rank */ - - free(cube_buf); - free(zero_buf); - -} /* test_select_hyper_checker_board_dr() */ -#endif -/**************************************************************** -** -** test_select_hyper_copy(): Test H5S (dataspace) selection code. -** Tests copying hyperslab selections -** -****************************************************************/ -static void -test_select_hyper_copy(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t data1, data2; /* Dataset IDs */ - hid_t sid1, sid2, sid3; /* Dataspace IDs */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - uint16_t *wbuf, /* buffer to write to disk */ - *rbuf, /* 1st buffer read from disk */ - *rbuf2, /* 2nd buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n")); - - /* Allocate write & read buffers */ - wbuf = (uint16_t *)malloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - rbuf2 = (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf2, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x3x3 count with a stride of 2x4x3 & 1x2x2 block hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - start[2] = 0; - stride[0] = 2; - stride[1] = 4; - stride[2] = 3; - count[0] = 2; - count[1] = 3; - count[2] = 3; - block[0] = 1; - block[1] = 2; - block[2] = 2; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 4x2 count with a stride of 5x5 & 3x3 block hyperslab for memory dataset */ - start[0] = 1; - start[1] = 1; - stride[0] = 5; - stride[1] = 5; - count[0] = 4; - count[1] = 2; - block[0] = 3; - block[1] = 3; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Make a copy of the dataspace to write */ - sid3 = H5Scopy(sid2); - CHECK(sid3, FAIL, "H5Scopy"); - - /* Create a dataset */ - data1 = H5Dcreate2(fid1, SPACE1_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(data1, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create another dataset */ - data2 = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(data2, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 3x4 count with a stride of 4x4 & 2x3 block hyperslab for memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 4; - stride[1] = 4; - count[0] = 3; - count[1] = 4; - block[0] = 2; - block[1] = 3; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Make a copy of the dataspace to read */ - sid3 = H5Scopy(sid2); - CHECK(sid3, FAIL, "H5Scopy"); - - /* Read selection from disk */ - ret = H5Dread(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Read selection from disk */ - ret = H5Dread(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, rbuf2); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - if (memcmp(rbuf, rbuf2, sizeof(uint16_t) * SPACE3_DIM1 * SPACE3_DIM2) != 0) - TestErrPrintf("hyperslab values don't match! Line=%d\n", __LINE__); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close 2nd memory dataspace */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(data1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close Dataset */ - ret = H5Dclose(data2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(rbuf2); -} /* test_select_hyper_copy() */ - -/**************************************************************** -** -** test_select_point_copy(): Test H5S (dataspace) selection code. -** Tests copying point selections -** -****************************************************************/ -static void -test_select_point_copy(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t data1, data2; /* Dataset IDs */ - hid_t sid1, sid2, sid3; /* Dataspace IDs */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ - hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ - hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ - uint16_t *wbuf, /* buffer to write to disk */ - *rbuf, /* 1st buffer read from disk */ - *rbuf2, /* 2nd buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n")); - - /* Allocate write & read buffers */ - wbuf = (uint16_t *)malloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - rbuf2 = (uint16_t *)calloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf2, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for disk dataset */ - coord1[0][0] = 0; - coord1[0][1] = 10; - coord1[0][2] = 5; - coord1[1][0] = 1; - coord1[1][1] = 2; - coord1[1][2] = 7; - coord1[2][0] = 2; - coord1[2][1] = 4; - coord1[2][2] = 9; - coord1[3][0] = 0; - coord1[3][1] = 6; - coord1[3][2] = 11; - coord1[4][0] = 1; - coord1[4][1] = 8; - coord1[4][2] = 13; - coord1[5][0] = 2; - coord1[5][1] = 12; - coord1[5][2] = 0; - coord1[6][0] = 0; - coord1[6][1] = 14; - coord1[6][2] = 2; - coord1[7][0] = 1; - coord1[7][1] = 0; - coord1[7][2] = 4; - coord1[8][0] = 2; - coord1[8][1] = 1; - coord1[8][2] = 6; - coord1[9][0] = 0; - coord1[9][1] = 3; - coord1[9][2] = 8; - ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Select sequence of ten points for write dataset */ - coord2[0][0] = 12; - coord2[0][1] = 3; - coord2[1][0] = 15; - coord2[1][1] = 13; - coord2[2][0] = 7; - coord2[2][1] = 25; - coord2[3][0] = 0; - coord2[3][1] = 6; - coord2[4][0] = 13; - coord2[4][1] = 0; - coord2[5][0] = 24; - coord2[5][1] = 11; - coord2[6][0] = 12; - coord2[6][1] = 21; - coord2[7][0] = 29; - coord2[7][1] = 4; - coord2[8][0] = 8; - coord2[8][1] = 8; - coord2[9][0] = 19; - coord2[9][1] = 17; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Make a copy of the dataspace to write */ - sid3 = H5Scopy(sid2); - CHECK(sid3, FAIL, "H5Scopy"); - - /* Create a dataset */ - data1 = H5Dcreate2(fid1, SPACE1_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(data1, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create another dataset */ - data2 = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(data2, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select sequence of points for read dataset */ - coord3[0][0] = 0; - coord3[0][1] = 2; - coord3[1][0] = 4; - coord3[1][1] = 8; - coord3[2][0] = 13; - coord3[2][1] = 13; - coord3[3][0] = 14; - coord3[3][1] = 25; - coord3[4][0] = 7; - coord3[4][1] = 9; - coord3[5][0] = 2; - coord3[5][1] = 0; - coord3[6][0] = 9; - coord3[6][1] = 19; - coord3[7][0] = 1; - coord3[7][1] = 22; - coord3[8][0] = 12; - coord3[8][1] = 21; - coord3[9][0] = 11; - coord3[9][1] = 6; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Make a copy of the dataspace to read */ - sid3 = H5Scopy(sid2); - CHECK(sid3, FAIL, "H5Scopy"); - - /* Read selection from disk */ - ret = H5Dread(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Read selection from disk */ - ret = H5Dread(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, rbuf2); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - if (memcmp(rbuf, rbuf2, sizeof(uint16_t) * SPACE3_DIM1 * SPACE3_DIM2) != 0) - TestErrPrintf("point values don't match!\n"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close 2nd memory dataspace */ - ret = H5Sclose(sid3); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(data1); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close Dataset */ - ret = H5Dclose(data2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); - free(rbuf2); -} /* test_select_point_copy() */ - -/**************************************************************** -** -** test_select_hyper_offset(): Test basic H5S (dataspace) selection code. -** Tests hyperslabs of various sizes and dimensionalities with selection -** offsets. -** -****************************************************************/ -static void -test_select_hyper_offset(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - hssize_t offset[SPACE1_RANK]; /* Offset of selection */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - htri_t valid; /* Generic boolean return value */ - H5S_class_t ext_type; /* Extent type */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with Offsets\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Verify extent type */ - ext_type = H5Sget_simple_extent_type(sid1); - VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type"); - - /* Select 2x15x13 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Check a valid offset */ - offset[0] = -1; - offset[1] = 0; - offset[2] = 0; - ret = H5Soffset_simple(sid1, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid1); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Check an invalid offset */ - offset[0] = 10; - offset[1] = 0; - offset[2] = 0; - ret = H5Soffset_simple(sid1, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid1); - VERIFY(valid, false, "H5Sselect_valid"); - - /* Reset offset */ - offset[0] = 0; - offset[1] = 0; - offset[2] = 0; - ret = H5Soffset_simple(sid1, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid1); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Select 15x26 hyperslab for memory dataset */ - start[0] = 15; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Choose a valid offset for the memory dataspace */ - offset[0] = -10; - offset[1] = 0; - ret = H5Soffset_simple(sid2, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid2); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0; i < SPACE3_DIM1; i++) { - tbuf = wbuf + ((i + 5) * SPACE2_DIM2); - tbuf2 = rbuf + (i * SPACE3_DIM2); - for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%u, *tbuf2=%u\n", - __LINE__, i, j, (unsigned)*tbuf, (unsigned)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_offset() */ - -/**************************************************************** -** -** test_select_hyper_offset2(): Test basic H5S (dataspace) selection code. -** Tests optimized hyperslab I/O with selection offsets. -** -****************************************************************/ -static void -test_select_hyper_offset2(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; - hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; - hsize_t start[SPACE7_RANK]; /* Starting location of hyperslab */ - hsize_t count[SPACE7_RANK]; /* Element count of hyperslab */ - hssize_t offset[SPACE7_RANK]; /* Offset of selection */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - htri_t valid; /* Generic boolean return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing More Hyperslab Selection Functions with Offsets\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE7_DIM1 * SPACE7_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE7_DIM1; i++) - for (j = 0; j < SPACE7_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE7_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 4x10 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - count[0] = 4; - count[1] = 10; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Set offset */ - offset[0] = 1; - offset[1] = 0; - ret = H5Soffset_simple(sid1, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid1); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Select 4x10 hyperslab for memory dataset */ - start[0] = 1; - start[1] = 0; - count[0] = 4; - count[1] = 10; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Choose a valid offset for the memory dataspace */ - offset[0] = 2; - offset[1] = 0; - ret = H5Soffset_simple(sid2, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid2); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE7_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0; i < 4; i++) { - tbuf = wbuf + ((i + 3) * SPACE7_DIM2); - tbuf2 = rbuf + ((i + 3) * SPACE7_DIM2); - for (j = 0; j < SPACE7_DIM2; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%u, *tbuf2=%u\n", - __LINE__, i, j, (unsigned)*tbuf, (unsigned)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_offset2() */ - -/**************************************************************** -** -** test_select_point_offset(): Test basic H5S (dataspace) selection code. -** Tests element selections between dataspaces of various sizes -** and dimensionalities with selection offsets. -** -****************************************************************/ -static void -test_select_point_offset(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */ - hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */ - hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */ - hssize_t offset[SPACE1_RANK]; /* Offset of selection */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - htri_t valid; /* Generic boolean return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Element Selection Functions\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for write buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for disk dataset */ - coord1[0][0] = 0; - coord1[0][1] = 10; - coord1[0][2] = 5; - coord1[1][0] = 1; - coord1[1][1] = 2; - coord1[1][2] = 7; - coord1[2][0] = 2; - coord1[2][1] = 4; - coord1[2][2] = 9; - coord1[3][0] = 0; - coord1[3][1] = 6; - coord1[3][2] = 11; - coord1[4][0] = 1; - coord1[4][1] = 8; - coord1[4][2] = 12; - coord1[5][0] = 2; - coord1[5][1] = 12; - coord1[5][2] = 0; - coord1[6][0] = 0; - coord1[6][1] = 14; - coord1[6][2] = 2; - coord1[7][0] = 1; - coord1[7][1] = 0; - coord1[7][2] = 4; - coord1[8][0] = 2; - coord1[8][1] = 1; - coord1[8][2] = 6; - coord1[9][0] = 0; - coord1[9][1] = 3; - coord1[9][2] = 8; - ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Check a valid offset */ - offset[0] = 0; - offset[1] = 0; - offset[2] = 1; - ret = H5Soffset_simple(sid1, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid1); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Check an invalid offset */ - offset[0] = 10; - offset[1] = 0; - offset[2] = 0; - ret = H5Soffset_simple(sid1, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid1); - VERIFY(valid, false, "H5Sselect_valid"); - - /* Reset offset */ - offset[0] = 0; - offset[1] = 0; - offset[2] = 0; - ret = H5Soffset_simple(sid1, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid1); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Select sequence of ten points for write dataset */ - coord2[0][0] = 12; - coord2[0][1] = 3; - coord2[1][0] = 15; - coord2[1][1] = 13; - coord2[2][0] = 7; - coord2[2][1] = 24; - coord2[3][0] = 0; - coord2[3][1] = 6; - coord2[4][0] = 13; - coord2[4][1] = 0; - coord2[5][0] = 24; - coord2[5][1] = 11; - coord2[6][0] = 12; - coord2[6][1] = 21; - coord2[7][0] = 23; - coord2[7][1] = 4; - coord2[8][0] = 8; - coord2[8][1] = 8; - coord2[9][0] = 19; - coord2[9][1] = 17; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Choose a valid offset for the memory dataspace */ - offset[0] = 5; - offset[1] = 1; - ret = H5Soffset_simple(sid2, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - valid = H5Sselect_valid(sid2); - VERIFY(valid, true, "H5Sselect_valid"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select sequence of points for read dataset */ - coord3[0][0] = 0; - coord3[0][1] = 2; - coord3[1][0] = 4; - coord3[1][1] = 8; - coord3[2][0] = 13; - coord3[2][1] = 13; - coord3[3][0] = 14; - coord3[3][1] = 25; - coord3[4][0] = 7; - coord3[4][1] = 9; - coord3[5][0] = 2; - coord3[5][1] = 0; - coord3[6][0] = 9; - coord3[6][1] = 19; - coord3[7][0] = 1; - coord3[7][1] = 22; - coord3[8][0] = 12; - coord3[8][1] = 21; - coord3[9][0] = 11; - coord3[9][1] = 6; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0; i < POINT1_NPOINTS; i++) { - tbuf = wbuf + ((coord2[i][0] + (hsize_t)offset[0]) * SPACE2_DIM2) + coord2[i][1] + (hsize_t)offset[1]; - tbuf2 = rbuf + (coord3[i][0] * SPACE3_DIM2) + coord3[i][1]; - if (*tbuf != *tbuf2) - TestErrPrintf("element values don't match!, i=%d\n", i); - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_point_offset() */ - -/**************************************************************** -** -** test_select_hyper_union(): Test basic H5S (dataspace) selection code. -** Tests unions of hyperslabs of various sizes and dimensionalities. -** -****************************************************************/ -static void -test_select_hyper_union(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t xfer; /* Dataset Transfer Property List ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - size_t begin[SPACE2_DIM1] = /* Offset within irregular block */ - {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* First ten rows start at offset 0 */ - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; /* Next eighteen rows start at offset 5 */ - size_t len[SPACE2_DIM1] = /* Len of each row within irregular block */ - {10, 10, 10, 10, 10, 10, 10, 10, /* First eight rows are 10 long */ - 20, 20, /* Next two rows are 20 long */ - 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}; /* Next eighteen rows are 15 long */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - hssize_t npoints; /* Number of elements in selection */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with unions of hyperslabs\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Test simple case of one block overlapping another */ - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x15x13 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid1); - VERIFY(npoints, 2 * 15 * 13, "H5Sget_select_npoints"); - - /* Select 8x26 hyperslab for memory dataset */ - start[0] = 15; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 8; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Union overlapping 8x26 hyperslab for memory dataset (to form a 15x26 selection) */ - start[0] = 22; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 8; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0; i < SPACE3_DIM1; i++) { - tbuf = wbuf + ((i + 15) * SPACE2_DIM2); - tbuf2 = rbuf + (i * SPACE3_DIM2); - for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", - __LINE__, i, j, (int)*tbuf, (int)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Test simple case of several block overlapping another */ - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x15x13 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 8x15 hyperslab for memory dataset */ - start[0] = 15; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 8; - count[1] = 15; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Union overlapping 8x15 hyperslab for memory dataset (to form a 15x15 selection) */ - start[0] = 22; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 8; - count[1] = 15; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Union overlapping 15x15 hyperslab for memory dataset (to form a 15x26 selection) */ - start[0] = 15; - start[1] = 11; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 15; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0; i < SPACE3_DIM1; i++) { - tbuf = wbuf + ((i + 15) * SPACE2_DIM2); - tbuf2 = rbuf + (i * SPACE3_DIM2); - for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", - __LINE__, i, j, (int)*tbuf, (int)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Test disjoint case of two non-overlapping blocks */ - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x15x13 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 7x26 hyperslab for memory dataset */ - start[0] = 1; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 7; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Union non-overlapping 8x26 hyperslab for memory dataset (to form a 15x26 disjoint selection) */ - start[0] = 22; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 8; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE3_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0; i < SPACE3_DIM1; i++) { - /* Jump over gap in middle */ - if (i < 7) - tbuf = wbuf + ((i + 1) * SPACE2_DIM2); - else - tbuf = wbuf + ((i + 15) * SPACE2_DIM2); - tbuf2 = rbuf + (i * SPACE3_DIM2); - for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", - __LINE__, i, j, (int)*tbuf, (int)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Test disjoint case of two non-overlapping blocks with hyperslab caching turned off */ - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x15x13 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 7x26 hyperslab for memory dataset */ - start[0] = 1; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 7; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Union non-overlapping 8x26 hyperslab for memory dataset (to form a 15x26 disjoint selection) */ - start[0] = 22; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 8; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE4_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - xfer = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer, FAIL, "H5Pcreate"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Close transfer property list */ - ret = H5Pclose(xfer); - CHECK(ret, FAIL, "H5Pclose"); - - /* Compare data read with data written out */ - for (i = 0; i < SPACE3_DIM1; i++) { - /* Jump over gap in middle */ - if (i < 7) - tbuf = wbuf + ((i + 1) * SPACE2_DIM2); - else - tbuf = wbuf + ((i + 15) * SPACE2_DIM2); - tbuf2 = rbuf + (i * SPACE3_DIM2); - for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", - __LINE__, i, j, (int)*tbuf, (int)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Test case of two blocks which overlap corners and must be split */ - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x15x13 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 10x10 hyperslab for memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Union overlapping 15x20 hyperslab for memory dataset (forming a irregularly shaped region) */ - start[0] = 8; - start[1] = 5; - stride[0] = 1; - stride[1] = 1; - count[0] = 20; - count[1] = 15; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE5_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0, tbuf2 = rbuf; i < SPACE2_DIM1; i++) { - tbuf = wbuf + (i * SPACE2_DIM2) + begin[i]; - for (j = 0; j < (int)len[i]; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", - __LINE__, i, j, (int)*tbuf, (int)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_union() */ - -/**************************************************************** -** -** test_select_hyper_union_stagger(): Test basic H5S (dataspace) selection code. -** Tests unions of staggered hyperslabs. (Uses H5Scombine_hyperslab -** and H5Smodify_select instead of H5Sselect_hyperslab) -** -****************************************************************/ -static void -test_select_hyper_union_stagger(void) -{ - hid_t file_id; /* File ID */ - hid_t dset_id; /* Dataset ID */ - hid_t dataspace; /* File dataspace ID */ - hid_t memspace; /* Memory dataspace ID */ - hid_t tmp_space; /* Temporary dataspace ID */ - hid_t tmp2_space; /* Another emporary dataspace ID */ - hsize_t dimsm[2] = {7, 7}; /* Memory array dimensions */ - hsize_t dimsf[2] = {6, 5}; /* File array dimensions */ - hsize_t count[2] = {3, 1}; /* 1st Hyperslab size */ - hsize_t count2[2] = {3, 1}; /* 2nd Hyperslab size */ - hsize_t count3[2] = {2, 1}; /* 3rd Hyperslab size */ - hsize_t start[2] = {0, 0}; /* 1st Hyperslab offset */ - hsize_t start2[2] = {2, 1}; /* 2nd Hyperslab offset */ - hsize_t start3[2] = {4, 2}; /* 3rd Hyperslab offset */ - hsize_t count_out[2] = {4, 2}; /* Hyperslab size in memory */ - hsize_t start_out[2] = {0, 3}; /* Hyperslab offset in memory */ - int data[6][5]; /* Data to write */ - int data_out[7][7]; /* Data read in */ - int input_loc[8][2] = {{0, 0}, {1, 0}, {2, 0}, {2, 1}, {3, 1}, {4, 1}, {4, 2}, {5, 2}}; - int output_loc[8][2] = {{0, 3}, {0, 4}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 3}, {3, 4}}; - int dsetrank = 2; /* File Dataset rank */ - int memrank = 2; /* Memory Dataset rank */ - int i, j; /* Local counting variables */ - herr_t error; - hsize_t stride[2] = {1, 1}; - hsize_t block[2] = {1, 1}; - - /* Initialize data to write */ - for (i = 0; i < 6; i++) - for (j = 0; j < 5; j++) - data[i][j] = j * 10 + i; - - /* Create file */ - file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Create File Dataspace */ - dataspace = H5Screate_simple(dsetrank, dimsf, NULL); - CHECK(dataspace, FAIL, "H5Screate_simple"); - - /* Create File Dataset */ - dset_id = - H5Dcreate2(file_id, "IntArray", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - /* Write File Dataset */ - error = H5Dwrite(dset_id, H5T_NATIVE_INT, dataspace, dataspace, H5P_DEFAULT, data); - CHECK(error, FAIL, "H5Dwrite"); - - /* Close things */ - error = H5Sclose(dataspace); - CHECK(error, FAIL, "H5Sclose"); - error = H5Dclose(dset_id); - CHECK(error, FAIL, "H5Dclose"); - error = H5Fclose(file_id); - CHECK(error, FAIL, "H5Fclose"); - - /* Initialize input buffer */ - memset(data_out, 0, 7 * 7 * sizeof(int)); - - /* Open file */ - file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - - /* Open dataset */ - dset_id = H5Dopen2(file_id, "IntArray", H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - /* Get the dataspace */ - dataspace = H5Dget_space(dset_id); - CHECK(dataspace, FAIL, "H5Dget_space"); - - /* Select the hyperslabs */ - error = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - tmp_space = H5Scombine_hyperslab(dataspace, H5S_SELECT_OR, start2, stride, count2, block); - CHECK(tmp_space, FAIL, "H5Scombine_hyperslab"); - - /* Copy the file dataspace and select hyperslab */ - tmp2_space = H5Scopy(dataspace); - CHECK(tmp2_space, FAIL, "H5Scopy"); - error = H5Sselect_hyperslab(tmp2_space, H5S_SELECT_SET, start3, stride, count3, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Combine the copied dataspace with the temporary dataspace */ - error = H5Smodify_select(tmp_space, H5S_SELECT_OR, tmp2_space); - CHECK(error, FAIL, "H5Smodify_select"); - - /* Create Memory Dataspace */ - memspace = H5Screate_simple(memrank, dimsm, NULL); - CHECK(memspace, FAIL, "H5Screate_simple"); - - /* Select hyperslab in memory */ - error = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start_out, stride, count_out, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Read File Dataset */ - error = H5Dread(dset_id, H5T_NATIVE_INT, memspace, tmp_space, H5P_DEFAULT, data_out); - CHECK(error, FAIL, "H5Dread"); - - /* Verify input data */ - for (i = 0; i < 8; i++) { - if (data[input_loc[i][0]][input_loc[i][1]] != data_out[output_loc[i][0]][output_loc[i][1]]) { - printf("input data #%d is wrong!\n", i); - printf("input_loc=[%d][%d]\n", input_loc[i][0], input_loc[i][1]); - printf("output_loc=[%d][%d]\n", output_loc[i][0], output_loc[i][1]); - printf("data=%d\n", data[input_loc[i][0]][input_loc[i][1]]); - TestErrPrintf("data_out=%d\n", data_out[output_loc[i][0]][output_loc[i][1]]); - } /* end if */ - } /* end for */ - - /* Close things */ - error = H5Sclose(tmp2_space); - CHECK(error, FAIL, "H5Sclose"); - error = H5Sclose(tmp_space); - CHECK(error, FAIL, "H5Sclose"); - error = H5Sclose(dataspace); - CHECK(error, FAIL, "H5Sclose"); - error = H5Sclose(memspace); - CHECK(error, FAIL, "H5Sclose"); - error = H5Dclose(dset_id); - CHECK(error, FAIL, "H5Dclose"); - error = H5Fclose(file_id); - CHECK(error, FAIL, "H5Fclose"); -} - -/**************************************************************** -** -** test_select_hyper_union_3d(): Test basic H5S (dataspace) selection code. -** Tests unions of hyperslabs in 3-D (Uses H5Scombine_hyperslab -** and H5Scombine_select instead of H5Sselect_hyperslab) -** -****************************************************************/ -static void -test_select_hyper_union_3d(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hid_t tmp_space; /* Temporary Dataspace ID */ - hid_t tmp2_space; /* Another temporary Dataspace ID */ - hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; - hsize_t dims2[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3}; - hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2}; - hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */ - struct row_list { - size_t z; - size_t y; - size_t x; - size_t l; - } rows[] = { - /* Array of x,y,z coordinates & length for each row written from memory */ - {0, 0, 0, 6}, /* 1st face of 3-D object */ - {0, 1, 0, 6}, {0, 2, 0, 6}, {0, 3, 0, 6}, {0, 4, 0, 6}, {1, 0, 0, 6}, /* 2nd face of 3-D object */ - {1, 1, 0, 6}, {1, 2, 0, 6}, {1, 3, 0, 6}, {1, 4, 0, 6}, {2, 0, 0, 6}, /* 3rd face of 3-D object */ - {2, 1, 0, 10}, {2, 2, 0, 10}, {2, 3, 0, 10}, {2, 4, 0, 10}, {2, 5, 2, 8}, - {2, 6, 2, 8}, {3, 0, 0, 6}, /* 4th face of 3-D object */ - {3, 1, 0, 10}, {3, 2, 0, 10}, {3, 3, 0, 10}, {3, 4, 0, 10}, {3, 5, 2, 8}, - {3, 6, 2, 8}, {4, 0, 0, 6}, /* 5th face of 3-D object */ - {4, 1, 0, 10}, {4, 2, 0, 10}, {4, 3, 0, 10}, {4, 4, 0, 10}, {4, 5, 2, 8}, - {4, 6, 2, 8}, {5, 1, 2, 8}, /* 6th face of 3-D object */ - {5, 2, 2, 8}, {5, 3, 2, 8}, {5, 4, 2, 8}, {5, 5, 2, 8}, {5, 6, 2, 8}, - {6, 1, 2, 8}, /* 7th face of 3-D object */ - {6, 2, 2, 8}, {6, 3, 2, 8}, {6, 4, 2, 8}, {6, 5, 2, 8}, {6, 6, 2, 8}, - {7, 1, 2, 8}, /* 8th face of 3-D object */ - {7, 2, 2, 8}, {7, 3, 2, 8}, {7, 4, 2, 8}, {7, 5, 2, 8}, {7, 6, 2, 8}}; - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j, k; /* Counters */ - herr_t ret; /* Generic return value */ - hsize_t npoints; /* Number of elements in selection */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with unions of 3-D hyperslabs\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), SPACE3_DIM1 * SPACE3_DIM2); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE4_DIM1; i++) - for (j = 0; j < SPACE4_DIM2; j++) - for (k = 0; k < SPACE4_DIM3; k++) - *tbuf++ = (uint8_t)((((i * SPACE4_DIM2) + j) * SPACE4_DIM3) + k); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Test case of two blocks which overlap corners and must be split */ - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE4_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 2x15x13 hyperslab for disk dataset */ - start[0] = 1; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 2; - count[1] = 15; - count[2] = 13; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select 5x5x6 hyperslab for memory dataset */ - start[0] = 0; - start[1] = 0; - start[2] = 0; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 5; - count[1] = 5; - count[2] = 6; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Union overlapping 15x20 hyperslab for memory dataset (forming a irregularly shaped region) */ - start[0] = 2; - start[1] = 1; - start[2] = 2; - stride[0] = 1; - stride[1] = 1; - stride[2] = 1; - count[0] = 6; - count[1] = 6; - count[2] = 8; - block[0] = 1; - block[1] = 1; - block[2] = 1; - tmp_space = H5Scombine_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(tmp_space, FAIL, "H5Sselect_hyperslab"); - - /* Combine dataspaces and create new dataspace */ - tmp2_space = H5Scombine_select(sid2, H5S_SELECT_OR, tmp_space); - CHECK(tmp2_space, FAIL, "H5Scombin_select"); - - npoints = (hsize_t)H5Sget_select_npoints(tmp2_space); - VERIFY(npoints, 15 * 26, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, tmp2_space, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close temporary dataspaces */ - ret = H5Sclose(tmp_space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(tmp2_space); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 15x26 hyperslab for reading memory dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 15; - count[1] = 26; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read with data written out */ - for (i = 0, tbuf2 = rbuf; i < (int)(sizeof(rows) / sizeof(struct row_list)); i++) { - tbuf = wbuf + (rows[i].z * SPACE4_DIM3 * SPACE4_DIM2) + (rows[i].y * SPACE4_DIM3) + rows[i].x; - for (j = 0; j < (int)rows[i].l; j++, tbuf++, tbuf2++) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", - __LINE__, i, j, (int)*tbuf, (int)*tbuf2); - } /* end for */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_union_3d() */ - -/**************************************************************** -** -** test_select_hyper_valid_combination(): Tests invalid and valid -** combinations of selections on dataspace for H5Scombine_select -** and H5Smodify_select. -** -****************************************************************/ -static void -test_select_hyper_valid_combination(void) -{ - hid_t single_pt_sid; /* Dataspace ID with single point selection */ - hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */ - hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */ - hid_t non_existent_sid = -1; /* A non-existent space id */ - hid_t tmp_sid; /* Temporary dataspace ID */ - hsize_t dims2D[] = {SPACE9_DIM1, SPACE9_DIM2}; - hsize_t dims3D[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3}; - - hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */ - hsize_t start[SPACE4_RANK]; /* Hyperslab start */ - hsize_t stride[SPACE4_RANK]; /* Hyperslab stride */ - hsize_t count[SPACE4_RANK]; /* Hyperslab block count */ - hsize_t block[SPACE4_RANK]; /* Hyperslab block size */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Selection Combination Validity\n")); - assert(SPACE9_DIM2 >= POINT1_NPOINTS); - - /* Create dataspace for single point selection */ - single_pt_sid = H5Screate_simple(SPACE9_RANK, dims2D, NULL); - CHECK(single_pt_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for multiple point selection */ - coord1[0][0] = 2; - coord1[0][1] = 2; - ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create dataspace for single hyperslab selection */ - single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims2D, NULL); - CHECK(single_hyper_sid, FAIL, "H5Screate_simple"); - - /* Select 10x10 hyperslab for single hyperslab selection */ - start[0] = 1; - start[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = (SPACE9_DIM1 - 2); - block[1] = (SPACE9_DIM2 - 2); - ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for regular hyperslab selection */ - regular_hyper_sid = H5Screate_simple(SPACE4_RANK, dims3D, NULL); - CHECK(regular_hyper_sid, FAIL, "H5Screate_simple"); - - /* Select regular, strided hyperslab selection */ - start[0] = 2; - start[1] = 2; - start[2] = 2; - stride[0] = 2; - stride[1] = 2; - stride[2] = 2; - count[0] = 5; - count[1] = 2; - count[2] = 5; - block[0] = 1; - block[1] = 1; - block[2] = 1; - ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Test all the selections created */ - - /* Test the invalid combinations between point and hyperslab */ - H5E_BEGIN_TRY - { - tmp_sid = H5Scombine_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - - H5E_BEGIN_TRY - { - tmp_sid = H5Smodify_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Smodify_select"); - - /* Test the invalid combination between two hyperslab but of different dimension size */ - H5E_BEGIN_TRY - { - tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - - H5E_BEGIN_TRY - { - tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Smodify_select"); - - /* Test invalid operation inputs to the two functions */ - H5E_BEGIN_TRY - { - tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - - H5E_BEGIN_TRY - { - tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Smodify_select"); - - /* Test inputs in case of non-existent space ids */ - H5E_BEGIN_TRY - { - tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Scombine_select"); - - H5E_BEGIN_TRY - { - tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid); - } - H5E_END_TRY - VERIFY(tmp_sid, FAIL, "H5Smodify_select"); - - /* Close dataspaces */ - ret = H5Sclose(single_pt_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(regular_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_select_hyper_valid_combination() */ - -/**************************************************************** -** -** test_select_hyper_and_2d(): Test basic H5S (dataspace) selection code. -** Tests 'and' of hyperslabs in 2-D -** -****************************************************************/ -static void -test_select_hyper_and_2d(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims2[] = {SPACE2A_DIM1}; - hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - hssize_t npoints; /* Number of elements in selection */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with intersection of 2-D hyperslabs\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 10x10 hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Intersect overlapping 10x10 hyperslab */ - start[0] = 5; - start[1] = 5; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_AND, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid1); - VERIFY(npoints, 5 * 5, "H5Sget_select_npoints"); - - /* Select 25 hyperslab for memory dataset */ - start[0] = 0; - stride[0] = 1; - count[0] = 25; - block[0] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 5 * 5, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read entire dataset from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Initialize write buffer */ - for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { - if ((i >= 5 && i <= 9) && (j >= 5 && j <= 9)) { - if (*tbuf != *tbuf2) - printf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__, - i, j, (int)*tbuf, (int)*tbuf2); - tbuf2++; - } /* end if */ - else { - if (*tbuf != 0) - printf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j, - (int)*tbuf); - } /* end else */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_and_2d() */ - -/**************************************************************** -** -** test_select_hyper_xor_2d(): Test basic H5S (dataspace) selection code. -** Tests 'xor' of hyperslabs in 2-D -** -****************************************************************/ -static void -test_select_hyper_xor_2d(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims2[] = {SPACE2A_DIM1}; - hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - hssize_t npoints; /* Number of elements in selection */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with XOR of 2-D hyperslabs\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 10x10 hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Intersect overlapping 10x10 hyperslab */ - start[0] = 5; - start[1] = 5; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_XOR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid1); - VERIFY(npoints, 150, "H5Sget_select_npoints"); - - /* Select 25 hyperslab for memory dataset */ - start[0] = 0; - stride[0] = 1; - count[0] = 150; - block[0] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 150, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read entire dataset from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Initialize write buffer */ - for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { - if (((i >= 0 && i <= 4) && (j >= 0 && j <= 9)) || - ((i >= 5 && i <= 9) && ((j >= 0 && j <= 4) || (j >= 10 && j <= 14))) || - ((i >= 10 && i <= 14) && (j >= 5 && j <= 14))) { - if (*tbuf != *tbuf2) - printf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__, - i, j, (int)*tbuf, (int)*tbuf2); - tbuf2++; - } /* end if */ - else { - if (*tbuf != 0) - printf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j, - (int)*tbuf); - } /* end else */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_xor_2d() */ - -/**************************************************************** -** -** test_select_hyper_notb_2d(): Test basic H5S (dataspace) selection code. -** Tests 'notb' of hyperslabs in 2-D -** -****************************************************************/ -static void -test_select_hyper_notb_2d(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims2[] = {SPACE2A_DIM1}; - hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - hssize_t npoints; /* Number of elements in selection */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTB of 2-D hyperslabs\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 10x10 hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Intersect overlapping 10x10 hyperslab */ - start[0] = 5; - start[1] = 5; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_NOTB, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid1); - VERIFY(npoints, 75, "H5Sget_select_npoints"); - - /* Select 75 hyperslab for memory dataset */ - start[0] = 0; - stride[0] = 1; - count[0] = 75; - block[0] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 75, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read entire dataset from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Initialize write buffer */ - for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { - if (((i >= 0 && i <= 4) && (j >= 0 && j <= 9)) || ((i >= 5 && i <= 9) && (j >= 0 && j <= 4))) { - if (*tbuf != *tbuf2) - printf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__, - i, j, (int)*tbuf, (int)*tbuf2); - tbuf2++; - } /* end if */ - else { - if (*tbuf != 0) - printf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j, - (int)*tbuf); - } /* end else */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_notb_2d() */ - -/**************************************************************** -** -** test_select_hyper_nota_2d(): Test basic H5S (dataspace) selection code. -** Tests 'nota' of hyperslabs in 2-D -** -****************************************************************/ -static void -test_select_hyper_nota_2d(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2}; - hsize_t dims2[] = {SPACE2A_DIM1}; - hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */ - hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */ - hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */ - hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */ - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf, /* temporary buffer pointer */ - *tbuf2; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - hssize_t npoints; /* Number of elements in selection */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTA of 2-D hyperslabs\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Select 10x10 hyperslab for disk dataset */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Intersect overlapping 10x10 hyperslab */ - start[0] = 5; - start[1] = 5; - stride[0] = 1; - stride[1] = 1; - count[0] = 10; - count[1] = 10; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_NOTA, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid1); - VERIFY(npoints, 75, "H5Sget_select_npoints"); - - /* Select 75 hyperslab for memory dataset */ - start[0] = 0; - stride[0] = 1; - count[0] = 75; - block[0] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints = H5Sget_select_npoints(sid2); - VERIFY(npoints, 75, "H5Sget_select_npoints"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read entire dataset from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Initialize write buffer */ - for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++) - for (j = 0; j < SPACE2_DIM2; j++, tbuf++) { - if (((i >= 10 && i <= 14) && (j >= 5 && j <= 14)) || - ((i >= 5 && i <= 9) && (j >= 10 && j <= 14))) { - if (*tbuf != *tbuf2) - TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", - __LINE__, i, j, (int)*tbuf, (int)*tbuf2); - tbuf2++; - } /* end if */ - else { - if (*tbuf != 0) - TestErrPrintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, - i, j, (int)*tbuf); - } /* end else */ - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_nota_2d() */ - -/**************************************************************** -** -** test_select_hyper_iter2(): Iterator for checking hyperslab iteration -** -****************************************************************/ -static herr_t -test_select_hyper_iter2(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndim, const hsize_t *point, - void *_operator_data) -{ - int *tbuf = (int *)_elem, /* temporary buffer pointer */ - **tbuf2 = (int **)_operator_data; /* temporary buffer handle */ - unsigned u; /* Local counting variable */ - - if (*tbuf != **tbuf2) { - TestErrPrintf("Error in hyperslab iteration!\n"); - printf("location: { "); - for (u = 0; u < ndim; u++) { - printf("%2d", (int)point[u]); - if (u < (ndim - 1)) - printf(", "); - } /* end for */ - printf("}\n"); - printf("*tbuf=%d, **tbuf2=%d\n", *tbuf, **tbuf2); - return (-1); - } /* end if */ - else { - (*tbuf2)++; - return (0); - } -} /* end test_select_hyper_iter2() */ - -/**************************************************************** -** -** test_select_hyper_union_random_5d(): Test basic H5S (dataspace) selection code. -** Tests random unions of 5-D hyperslabs -** -****************************************************************/ -static void -test_select_hyper_union_random_5d(hid_t read_plist) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE5_DIM1, SPACE5_DIM2, SPACE5_DIM3, SPACE5_DIM4, SPACE5_DIM5}; - hsize_t dims2[] = {SPACE6_DIM1}; - hsize_t start[SPACE5_RANK]; /* Starting location of hyperslab */ - hsize_t count[SPACE5_RANK]; /* Element count of hyperslab */ - int *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j, k, l, m; /* Counters */ - herr_t ret; /* Generic return value */ - hssize_t npoints, /* Number of elements in file selection */ - npoints2; /* Number of elements in memory selection */ - unsigned seed; /* Random number seed for each test */ - unsigned test_num; /* Count of tests being executed */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab Selection Functions with random unions of 5-D hyperslabs\n")); - - /* Allocate write & read buffers */ - wbuf = (int *)malloc(sizeof(int) * SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5); - CHECK_PTR(wbuf, "malloc"); - rbuf = (int *)calloc(sizeof(int), - (size_t)(SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5)); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE5_DIM1; i++) - for (j = 0; j < SPACE5_DIM2; j++) - for (k = 0; k < SPACE5_DIM3; k++) - for (l = 0; l < SPACE5_DIM4; l++) - for (m = 0; m < SPACE5_DIM5; m++) - *tbuf++ = (int)(((((((i * SPACE5_DIM2) + j) * SPACE5_DIM3) + k) * SPACE5_DIM4) + l) * - SPACE5_DIM5) + - m; - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE5_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, SPACE5_NAME, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write entire dataset to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Create dataspace for reading buffer */ - sid2 = H5Screate_simple(SPACE6_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Get initial random # seed */ - seed = (unsigned)HDtime(NULL) + (unsigned)HDclock(); - - /* Crunch through a bunch of random hyperslab reads from the file dataset */ - for (test_num = 0; test_num < NRAND_HYPER; test_num++) { - /* Save random # seed for later use */ - /* (Used in case of errors, to regenerate the hyperslab sequence) */ - seed += (unsigned)HDclock(); - HDsrandom(seed); - - for (i = 0; i < NHYPERSLABS; i++) { - /* Select random hyperslab location & size for selection */ - for (j = 0; j < SPACE5_RANK; j++) { - start[j] = ((hsize_t)HDrandom() % dims1[j]); - count[j] = (((hsize_t)HDrandom() % (dims1[j] - start[j])) + 1); - } /* end for */ - - /* Select hyperslab */ - ret = H5Sselect_hyperslab(sid1, (i == 0 ? H5S_SELECT_SET : H5S_SELECT_OR), start, NULL, count, - NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - if (ret < 0) { - TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed); - break; - } /* end if */ - } /* end for */ - - /* Get the number of elements selected */ - npoints = H5Sget_select_npoints(sid1); - CHECK(npoints, 0, "H5Sget_select_npoints"); - - /* Select linear 1-D hyperslab for memory dataset */ - start[0] = 0; - count[0] = (hsize_t)npoints; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - npoints2 = H5Sget_select_npoints(sid2); - VERIFY(npoints, npoints2, "H5Sget_select_npoints"); - - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_NATIVE_INT, sid2, sid1, read_plist, rbuf); - CHECK(ret, FAIL, "H5Dread"); - if (ret < 0) { - TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed); - break; - } /* end if */ - - /* Compare data read with data written out */ - tbuf = rbuf; - ret = H5Diterate(wbuf, H5T_NATIVE_INT, sid1, test_select_hyper_iter2, &tbuf); - if (ret < 0) { - TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed); - break; - } /* end if */ - - /* Set the read buffer back to all zeroes */ - memset(rbuf, 0, (size_t)SPACE6_DIM1); - } /* end for */ - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_union_random_5d() */ - -/**************************************************************** -** -** test_select_hyper_chunk(): Test basic H5S (dataspace) selection code. -** Tests large hyperslab selection in chunked dataset -** -****************************************************************/ -static void -test_select_hyper_chunk(hid_t fapl_plist, hid_t xfer_plist) -{ - hsize_t dimsf[3]; /* dataset dimensions */ - hsize_t chunk_dimsf[3] = {CHUNK_X, CHUNK_Y, CHUNK_Z}; /* chunk sizes */ - short *data; /* data to write */ - short *tmpdata; /* data to write */ - - /* - * Data and output buffer initialization. - */ - hid_t file, dataset; /* handles */ - hid_t dataspace; - hid_t memspace; - hid_t plist; - hsize_t dimsm[3]; /* memory space dimensions */ - hsize_t dims_out[3]; /* dataset dimensions */ - herr_t status; - - short *data_out; /* output buffer */ - short *tmpdata_out; /* output buffer */ - - hsize_t count[3]; /* size of the hyperslab in the file */ - hsize_t offset[3]; /* hyperslab offset in the file */ - hsize_t count_out[3]; /* size of the hyperslab in memory */ - hsize_t offset_out[3]; /* hyperslab offset in memory */ - int i, j, k, status_n, rank; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Hyperslab I/O on Large Chunks\n")); - - /* Allocate the transfer buffers */ - data = (short *)malloc(sizeof(short) * X * Y * Z); - CHECK_PTR(data, "malloc"); - data_out = (short *)calloc((size_t)(NX * NY * NZ), sizeof(short)); - CHECK_PTR(data_out, "calloc"); - - /* - * Data buffer initialization. - */ - tmpdata = data; - for (j = 0; j < X; j++) - for (i = 0; i < Y; i++) - for (k = 0; k < Z; k++) - *tmpdata++ = (short)((k + 1) % 256); - - /* - * Create a new file using H5F_ACC_TRUNC access, - * the default file creation properties, and the default file - * access properties. - */ - file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_plist); - CHECK(file, FAIL, "H5Fcreate"); - - /* - * Describe the size of the array and create the dataspace for fixed - * size dataset. - */ - dimsf[0] = X; - dimsf[1] = Y; - dimsf[2] = Z; - dataspace = H5Screate_simple(RANK_F, dimsf, NULL); - CHECK(dataspace, FAIL, "H5Screate_simple"); - - /* - * Create a new dataset within the file using defined dataspace and - * chunking properties. - */ - plist = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist, FAIL, "H5Pcreate"); - status = H5Pset_chunk(plist, RANK_F, chunk_dimsf); - CHECK(status, FAIL, "H5Pset_chunk"); - dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, plist, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* - * Define hyperslab in the dataset. - */ - offset[0] = 0; - offset[1] = 0; - offset[2] = 0; - count[0] = NX_SUB; - count[1] = NY_SUB; - count[2] = NZ_SUB; - status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); - CHECK(status, FAIL, "H5Sselect_hyperslab"); - - /* - * Define the memory dataspace. - */ - dimsm[0] = NX; - dimsm[1] = NY; - dimsm[2] = NZ; - memspace = H5Screate_simple(RANK_M, dimsm, NULL); - CHECK(memspace, FAIL, "H5Screate_simple"); - - /* - * Define memory hyperslab. - */ - offset_out[0] = 0; - offset_out[1] = 0; - offset_out[2] = 0; - count_out[0] = NX_SUB; - count_out[1] = NY_SUB; - count_out[2] = NZ_SUB; - status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL); - CHECK(status, FAIL, "H5Sselect_hyperslab"); - - /* - * Write the data to the dataset using hyperslabs - */ - status = H5Dwrite(dataset, H5T_NATIVE_SHORT, memspace, dataspace, xfer_plist, data); - CHECK(status, FAIL, "H5Dwrite"); - - /* - * Close/release resources. - */ - status = H5Pclose(plist); - CHECK(status, FAIL, "H5Pclose"); - status = H5Sclose(dataspace); - CHECK(status, FAIL, "H5Sclose"); - status = H5Sclose(memspace); - CHECK(status, FAIL, "H5Sclose"); - status = H5Dclose(dataset); - CHECK(status, FAIL, "H5Dclose"); - status = H5Fclose(file); - CHECK(status, FAIL, "H5Fclose"); - - /************************************************************* - - This reads the hyperslab from the test.h5 file just - created, into a 3-dimensional plane of the 3-dimensional - array. - - ************************************************************/ - - /* - * Open the file and the dataset. - */ - file = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl_plist); - CHECK(file, FAIL, "H5Fopen"); - dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - dataspace = H5Dget_space(dataset); /* dataspace handle */ - CHECK(dataspace, FAIL, "H5Dget_space"); - rank = H5Sget_simple_extent_ndims(dataspace); - VERIFY(rank, 3, "H5Sget_simple_extent_ndims"); - status_n = H5Sget_simple_extent_dims(dataspace, dims_out, NULL); - CHECK(status_n, FAIL, "H5Sget_simple_extent_dims"); - VERIFY(dims_out[0], dimsf[0], "Dataset dimensions"); - VERIFY(dims_out[1], dimsf[1], "Dataset dimensions"); - VERIFY(dims_out[2], dimsf[2], "Dataset dimensions"); - - /* - * Define hyperslab in the dataset. - */ - offset[0] = 0; - offset[1] = 0; - offset[2] = 0; - count[0] = NX_SUB; - count[1] = NY_SUB; - count[2] = NZ_SUB; - status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL); - CHECK(status, FAIL, "H5Sselect_hyperslab"); - - /* - * Define the memory dataspace. - */ - dimsm[0] = NX; - dimsm[1] = NY; - dimsm[2] = NZ; - memspace = H5Screate_simple(RANK_M, dimsm, NULL); - CHECK(memspace, FAIL, "H5Screate_simple"); - - /* - * Define memory hyperslab. - */ - offset_out[0] = 0; - offset_out[1] = 0; - offset_out[2] = 0; - count_out[0] = NX_SUB; - count_out[1] = NY_SUB; - count_out[2] = NZ_SUB; - status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL); - CHECK(status, FAIL, "H5Sselect_hyperslab"); - - /* - * Read data from hyperslab in the file into the hyperslab in - * memory and display. - */ - status = H5Dread(dataset, H5T_NATIVE_SHORT, memspace, dataspace, xfer_plist, data_out); - CHECK(status, FAIL, "H5Dread"); - - /* Compare data written with data read in */ - tmpdata = data; - tmpdata_out = data_out; - for (j = 0; j < X; j++) - for (i = 0; i < Y; i++) - for (k = 0; k < Z; k++, tmpdata++, tmpdata_out++) { - if (*tmpdata != *tmpdata_out) - TestErrPrintf("Line %d: Error! j=%d, i=%d, k=%d, *tmpdata=%x, *tmpdata_out=%x\n", - __LINE__, j, i, k, (unsigned)*tmpdata, (unsigned)*tmpdata_out); - } /* end for */ - - /* - * Close and release resources. - */ - status = H5Dclose(dataset); - CHECK(status, FAIL, "H5Dclose"); - status = H5Sclose(dataspace); - CHECK(status, FAIL, "H5Sclose"); - status = H5Sclose(memspace); - CHECK(status, FAIL, "H5Sclose"); - status = H5Fclose(file); - CHECK(status, FAIL, "H5Fclose"); - free(data); - free(data_out); -} /* test_select_hyper_chunk() */ - -/**************************************************************** -** -** test_select_point_chunk(): Test basic H5S (dataspace) selection code. -** Tests combinations of hyperslab and point selections on -** chunked datasets. -** -****************************************************************/ -static void -test_select_point_chunk(void) -{ - hsize_t dimsf[SPACE7_RANK]; /* dataset dimensions */ - hsize_t chunk_dimsf[SPACE7_RANK] = {SPACE7_CHUNK_DIM1, SPACE7_CHUNK_DIM2}; /* chunk sizes */ - unsigned *data; /* data to write */ - unsigned *tmpdata; /* data to write */ - - /* - * Data and output buffer initialization. - */ - hid_t file, dataset; /* handles */ - hid_t dataspace; - hid_t pnt1_space; /* Dataspace to hold 1st point selection */ - hid_t pnt2_space; /* Dataspace to hold 2nd point selection */ - hid_t hyp1_space; /* Dataspace to hold 1st hyperslab selection */ - hid_t hyp2_space; /* Dataspace to hold 2nd hyperslab selection */ - hid_t dcpl; - herr_t ret; /* Generic return value */ - - unsigned *data_out; /* output buffer */ - - hsize_t start[SPACE7_RANK]; /* hyperslab offset */ - hsize_t count[SPACE7_RANK]; /* size of the hyperslab */ - hsize_t points[SPACE7_NPOINTS][SPACE7_RANK]; /* points for selection */ - unsigned i, j; /* Local index variables */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Point Selections on Chunked Datasets\n")); - - /* Allocate the transfer buffers */ - data = (unsigned *)malloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(data, "malloc"); - data_out = (unsigned *)calloc((size_t)(SPACE7_DIM1 * SPACE7_DIM2), sizeof(unsigned)); - CHECK_PTR(data_out, "calloc"); - - /* - * Data buffer initialization. - */ - tmpdata = data; - for (i = 0; i < SPACE7_DIM1; i++) - for (j = 0; j < SPACE7_DIM1; j++) - *tmpdata++ = ((i * SPACE7_DIM2) + j) % 256; - - /* - * Create a new file using H5F_ACC_TRUNC access, - * the default file creation properties and file - * access properties. - */ - file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create file dataspace */ - dimsf[0] = SPACE7_DIM1; - dimsf[1] = SPACE7_DIM2; - dataspace = H5Screate_simple(SPACE7_RANK, dimsf, NULL); - CHECK(dataspace, FAIL, "H5Screate_simple"); - - /* - * Create a new dataset within the file using defined dataspace and - * chunking properties. - */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - ret = H5Pset_chunk(dcpl, SPACE7_RANK, chunk_dimsf); - CHECK(ret, FAIL, "H5Pset_chunk"); - dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Create 1st point selection */ - pnt1_space = H5Scopy(dataspace); - CHECK(pnt1_space, FAIL, "H5Scopy"); - - points[0][0] = 3; - points[0][1] = 3; - points[1][0] = 3; - points[1][1] = 8; - points[2][0] = 8; - points[2][1] = 3; - points[3][0] = 8; - points[3][1] = 8; - points[4][0] = 1; /* In same chunk as point #0, but "earlier" in chunk */ - points[4][1] = 1; - points[5][0] = 1; /* In same chunk as point #1, but "earlier" in chunk */ - points[5][1] = 6; - points[6][0] = 6; /* In same chunk as point #2, but "earlier" in chunk */ - points[6][1] = 1; - points[7][0] = 6; /* In same chunk as point #3, but "earlier" in chunk */ - points[7][1] = 6; - ret = H5Sselect_elements(pnt1_space, H5S_SELECT_SET, (size_t)SPACE7_NPOINTS, (const hsize_t *)points); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create 1st hyperslab selection */ - hyp1_space = H5Scopy(dataspace); - CHECK(hyp1_space, FAIL, "H5Scopy"); - - start[0] = 2; - start[1] = 2; - count[0] = 4; - count[1] = 2; - ret = H5Sselect_hyperslab(hyp1_space, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write out data using 1st point selection for file & hyperslab for memory */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, hyp1_space, pnt1_space, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Create 2nd point selection */ - pnt2_space = H5Scopy(dataspace); - CHECK(pnt2_space, FAIL, "H5Scopy"); - - points[0][0] = 4; - points[0][1] = 4; - points[1][0] = 4; - points[1][1] = 9; - points[2][0] = 9; - points[2][1] = 4; - points[3][0] = 9; - points[3][1] = 9; - points[4][0] = 2; /* In same chunk as point #0, but "earlier" in chunk */ - points[4][1] = 2; - points[5][0] = 2; /* In same chunk as point #1, but "earlier" in chunk */ - points[5][1] = 7; - points[6][0] = 7; /* In same chunk as point #2, but "earlier" in chunk */ - points[6][1] = 2; - points[7][0] = 7; /* In same chunk as point #3, but "earlier" in chunk */ - points[7][1] = 7; - ret = H5Sselect_elements(pnt2_space, H5S_SELECT_SET, (size_t)SPACE7_NPOINTS, (const hsize_t *)points); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create 2nd hyperslab selection */ - hyp2_space = H5Scopy(dataspace); - CHECK(hyp2_space, FAIL, "H5Scopy"); - - start[0] = 2; - start[1] = 4; - count[0] = 4; - count[1] = 2; - ret = H5Sselect_hyperslab(hyp2_space, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write out data using 2nd hyperslab selection for file & point for memory */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, pnt2_space, hyp2_space, H5P_DEFAULT, data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close everything (except selections) */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Sclose(dataspace); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - /* Re-open file & dataset */ - file = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Read data using 1st point selection for file and hyperslab for memory */ - ret = H5Dread(dataset, H5T_NATIVE_UINT, hyp1_space, pnt1_space, H5P_DEFAULT, data_out); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify data (later) */ - - /* Read data using 2nd hyperslab selection for file and point for memory */ - ret = H5Dread(dataset, H5T_NATIVE_UINT, pnt2_space, hyp2_space, H5P_DEFAULT, data_out); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify data (later) */ - - /* Close everything (including selections) */ - ret = H5Sclose(pnt1_space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(pnt2_space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(hyp1_space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(hyp2_space); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - - free(data); - free(data_out); -} /* test_select_point_chunk() */ - -/**************************************************************** -** -** test_select_sclar_chunk(): Test basic H5S (dataspace) selection code. -** Tests using a scalar dataspace (in memory) to access chunked datasets. -** -****************************************************************/ -static void -test_select_scalar_chunk(void) -{ - hid_t file_id; /* File ID */ - hid_t dcpl; /* Dataset creation property list */ - hid_t dsid; /* Dataset ID */ - hid_t sid; /* Dataspace ID */ - hid_t m_sid; /* Memory dataspace */ - hsize_t dims[] = {2}; /* Dataset dimensions */ - hsize_t maxdims[] = {H5S_UNLIMITED}; /* Dataset maximum dimensions */ - hsize_t offset[] = {0}; /* Hyperslab start */ - hsize_t count[] = {1}; /* Hyperslab count */ - unsigned data = 2; /* Data to write */ - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Scalar Dataspaces and Chunked Datasets\n")); - - file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - dims[0] = 1024U; - ret = H5Pset_chunk(dcpl, 1, dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create 1-D dataspace */ - sid = H5Screate_simple(1, dims, maxdims); - CHECK(sid, FAIL, "H5Screate_simple"); - - dsid = H5Dcreate2(file_id, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dcreate2"); - - /* Select scalar area (offset 0, count 1) */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create scalar memory dataspace */ - m_sid = H5Screate(H5S_SCALAR); - CHECK(m_sid, FAIL, "H5Screate"); - - /* Write out data using scalar dataspace for memory dataspace */ - ret = H5Dwrite(dsid, H5T_NATIVE_UINT, m_sid, sid, H5P_DEFAULT, &data); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close resources */ - ret = H5Sclose(m_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(dsid); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_select_scalar_chunk() */ - -/**************************************************************** -** -** test_select_valid(): Test basic H5S (dataspace) selection code. -** Tests selection validity -** -****************************************************************/ -static void -test_select_valid(void) -{ - herr_t error; - htri_t valid; - hid_t main_space, sub_space; - hsize_t safe_start[2] = {1, 1}; - hsize_t safe_count[2] = {1, 1}; - hsize_t start[2]; - hsize_t dims[2], maxdims[2], size[2], count[2]; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Selection Validity\n")); - - MESSAGE(8, ("Case 1 : sub_space is not a valid dataspace\n")); - dims[0] = dims[1] = H5S_UNLIMITED; - - H5E_BEGIN_TRY - { - sub_space = H5Screate_simple(2, dims, NULL); - } - H5E_END_TRY - VERIFY(sub_space, FAIL, "H5Screate_simple"); - - H5E_BEGIN_TRY - { - valid = H5Sselect_valid(sub_space); - } - H5E_END_TRY - VERIFY(valid, FAIL, "H5Sselect_valid"); - - /* Set arrays and dataspace for the rest of the cases */ - count[0] = count[1] = 1; - dims[0] = dims[1] = maxdims[0] = maxdims[1] = 10; - - main_space = H5Screate_simple(2, dims, maxdims); - CHECK(main_space, FAIL, "H5Screate_simple"); - - MESSAGE(8, ("Case 2 : sub_space is a valid but closed dataspace\n")); - sub_space = H5Scopy(main_space); - CHECK(sub_space, FAIL, "H5Scopy"); - - error = H5Sclose(sub_space); - CHECK(error, FAIL, "H5Sclose"); - - H5E_BEGIN_TRY - { - valid = H5Sselect_valid(sub_space); - } - H5E_END_TRY - VERIFY(valid, FAIL, "H5Sselect_valid"); - - MESSAGE(8, ("Case 3 : in the dimensions\nTry offset (4,4) and size(6,6), the original space is of size " - "(10,10)\n")); - start[0] = start[1] = 4; - size[0] = size[1] = 6; - - sub_space = H5Scopy(main_space); - CHECK(sub_space, FAIL, "H5Scopy"); - - error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - valid = H5Sselect_valid(sub_space); - VERIFY(valid, true, "H5Sselect_valid"); - - error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - valid = H5Sselect_valid(sub_space); - VERIFY(valid, true, "H5Sselect_valid"); - - error = H5Sclose(sub_space); - CHECK(error, FAIL, "H5Sclose"); - - MESSAGE(8, ("Case 4 : exceed dimensions by 1\nTry offset (5,5) and size(6,6), the original space is of " - "size (10,10)\n")); - start[0] = start[1] = 5; - size[0] = size[1] = 6; - - sub_space = H5Scopy(main_space); - CHECK(sub_space, FAIL, "H5Scopy"); - - error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - valid = H5Sselect_valid(sub_space); - VERIFY(valid, false, "H5Sselect_valid"); - - error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - valid = H5Sselect_valid(sub_space); - VERIFY(valid, false, "H5Sselect_valid"); - - error = H5Sclose(sub_space); - CHECK(error, FAIL, "H5Sclose"); - - MESSAGE(8, ("Case 5 : exceed dimensions by 2\nTry offset (6,6) and size(6,6), the original space is of " - "size (10,10)\n")); - start[0] = start[1] = 6; - size[0] = size[1] = 6; - - sub_space = H5Scopy(main_space); - CHECK(sub_space, FAIL, "H5Scopy"); - - error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - valid = H5Sselect_valid(sub_space); - VERIFY(valid, false, "H5Sselect_valid"); - - error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - valid = H5Sselect_valid(sub_space); - VERIFY(valid, false, "H5Sselect_valid"); - - error = H5Sclose(sub_space); - CHECK(error, FAIL, "H5Sclose"); - error = H5Sclose(main_space); - CHECK(error, FAIL, "H5Sclose"); -} /* test_select_valid() */ - -/**************************************************************** -** -** test_select_combine(): Test basic H5S (dataspace) selection code. -** Tests combining "all" and "none" selections with hyperslab -** operations. -** -****************************************************************/ -static void -test_select_combine(void) -{ - hid_t base_id; /* Base dataspace for test */ - hid_t all_id; /* Dataspace for "all" selection */ - hid_t none_id; /* Dataspace for "none" selection */ - hid_t space1; /* Temporary dataspace #1 */ - hsize_t start[SPACE7_RANK]; /* Hyperslab start */ - hsize_t stride[SPACE7_RANK]; /* Hyperslab stride */ - hsize_t count[SPACE7_RANK]; /* Hyperslab count */ - hsize_t block[SPACE7_RANK]; /* Hyperslab block */ - hsize_t dims[SPACE7_RANK] = {SPACE7_DIM1, SPACE7_DIM2}; /* Dimensions of dataspace */ - H5S_sel_type sel_type; /* Selection type */ - hssize_t nblocks; /* Number of hyperslab blocks */ - hsize_t blocks[16][2][SPACE7_RANK]; /* List of blocks */ - herr_t error; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Selection Combinations\n")); - - /* Create dataspace for dataset on disk */ - base_id = H5Screate_simple(SPACE7_RANK, dims, NULL); - CHECK(base_id, FAIL, "H5Screate_simple"); - - /* Copy base dataspace and set selection to "all" */ - all_id = H5Scopy(base_id); - CHECK(all_id, FAIL, "H5Scopy"); - error = H5Sselect_all(all_id); - CHECK(error, FAIL, "H5Sselect_all"); - sel_type = H5Sget_select_type(all_id); - VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); - - /* Copy base dataspace and set selection to "none" */ - none_id = H5Scopy(base_id); - CHECK(none_id, FAIL, "H5Scopy"); - error = H5Sselect_none(none_id); - CHECK(error, FAIL, "H5Sselect_none"); - sel_type = H5Sget_select_type(none_id); - VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); - - /* Copy "all" selection & space */ - space1 = H5Scopy(all_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'OR' "all" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_OR, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that it's still "all" selection */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "all" selection & space */ - space1 = H5Scopy(all_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'AND' "all" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_AND, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is the same at the original block */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify that there is only one block */ - nblocks = H5Sget_select_hyper_nblocks(space1); - VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); - - /* Retrieve the block defined */ - memset(blocks, -1, sizeof(blocks)); /* Reset block list */ - error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); - CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); - - /* Verify that the correct block is defined */ - VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "all" selection & space */ - space1 = H5Scopy(all_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'XOR' "all" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_XOR, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is an inversion of the original block */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify that there are two blocks */ - nblocks = H5Sget_select_hyper_nblocks(space1); - VERIFY(nblocks, 2, "H5Sget_select_hyper_nblocks"); - - /* Retrieve the block defined */ - memset(blocks, -1, sizeof(blocks)); /* Reset block list */ - error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); - CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); - - /* Verify that the correct block is defined */ - VERIFY(blocks[0][0][0], 0, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][0][1], 5, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][0], 4, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][1], 9, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][0][0], 5, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][0][1], 0, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][1][0], 9, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][1][1], 9, "H5Sget_select_hyper_blocklist"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "all" selection & space */ - space1 = H5Scopy(all_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'NOTB' "all" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTB, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is an inversion of the original block */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify that there are two blocks */ - nblocks = H5Sget_select_hyper_nblocks(space1); - VERIFY(nblocks, 2, "H5Sget_select_hyper_nblocks"); - - /* Retrieve the block defined */ - memset(blocks, -1, sizeof(blocks)); /* Reset block list */ - error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); - CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); - - /* Verify that the correct block is defined */ - VERIFY(blocks[0][0][0], 0, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][0][1], 5, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][0], 4, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][1], 9, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][0][0], 5, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][0][1], 0, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][1][0], 9, "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[1][1][1], 9, "H5Sget_select_hyper_blocklist"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "all" selection & space */ - space1 = H5Scopy(all_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'NOTA' "all" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTA, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is the "none" selection */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "none" selection & space */ - space1 = H5Scopy(none_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'OR' "none" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_OR, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is the same as the original hyperslab */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify that there is only one block */ - nblocks = H5Sget_select_hyper_nblocks(space1); - VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); - - /* Retrieve the block defined */ - memset(blocks, -1, sizeof(blocks)); /* Reset block list */ - error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); - CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); - - /* Verify that the correct block is defined */ - VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "none" selection & space */ - space1 = H5Scopy(none_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'AND' "none" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_AND, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is the "none" selection */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "none" selection & space */ - space1 = H5Scopy(none_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'XOR' "none" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_XOR, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is the same as the original hyperslab */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify that there is only one block */ - nblocks = H5Sget_select_hyper_nblocks(space1); - VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); - - /* Retrieve the block defined */ - memset(blocks, -1, sizeof(blocks)); /* Reset block list */ - error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); - CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); - - /* Verify that the correct block is defined */ - VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "none" selection & space */ - space1 = H5Scopy(none_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'NOTB' "none" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTB, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is the "none" selection */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Copy "none" selection & space */ - space1 = H5Scopy(none_id); - CHECK(space1, FAIL, "H5Scopy"); - - /* 'NOTA' "none" selection with another hyperslab */ - start[0] = start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 1; - block[0] = block[1] = 5; - error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTA, start, stride, count, block); - CHECK(error, FAIL, "H5Sselect_hyperslab"); - - /* Verify that the new selection is the same as the original hyperslab */ - sel_type = H5Sget_select_type(space1); - VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type"); - - /* Verify that there is only one block */ - nblocks = H5Sget_select_hyper_nblocks(space1); - VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks"); - - /* Retrieve the block defined */ - memset(blocks, -1, sizeof(blocks)); /* Reset block list */ - error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks); - CHECK(error, FAIL, "H5Sget_select_hyper_blocklist"); - - /* Verify that the correct block is defined */ - VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist"); - VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist"); - - /* Close temporary dataspace */ - error = H5Sclose(space1); - CHECK(error, FAIL, "H5Sclose"); - - /* Close dataspaces */ - error = H5Sclose(base_id); - CHECK(error, FAIL, "H5Sclose"); - - error = H5Sclose(all_id); - CHECK(error, FAIL, "H5Sclose"); - - error = H5Sclose(none_id); - CHECK(error, FAIL, "H5Sclose"); -} /* test_select_combine() */ - -/* - * Typedef for iteration structure used in the fill value tests - */ -typedef struct { - unsigned short fill_value; /* The fill value to check */ - size_t curr_coord; /* Current coordinate to examine */ - hsize_t *coords; /* Pointer to selection's coordinates */ -} fill_iter_info; - -/**************************************************************** -** -** test_select_hyper_iter3(): Iterator for checking hyperslab iteration -** -****************************************************************/ -static herr_t -test_select_hyper_iter3(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndim, const hsize_t *point, - void *_operator_data) -{ - unsigned *tbuf = (unsigned *)_elem; /* temporary buffer pointer */ - fill_iter_info *iter_info = - (fill_iter_info *)_operator_data; /* Get the pointer to the iterator information */ - hsize_t *coord_ptr; /* Pointer to the coordinate information for a point*/ - - /* Check value in current buffer location */ - if (*tbuf != iter_info->fill_value) - return (-1); - else { - /* Check number of dimensions */ - if (ndim != SPACE7_RANK) - return (-1); - else { - /* Check Coordinates */ - coord_ptr = iter_info->coords + (2 * iter_info->curr_coord); - iter_info->curr_coord++; - if (coord_ptr[0] != point[0]) - return (-1); - else if (coord_ptr[1] != point[1]) - return (-1); - else - return (0); - } /* end else */ - } /* end else */ -} /* end test_select_hyper_iter3() */ - -/**************************************************************** -** -** test_select_fill_all(): Test basic H5S (dataspace) selection code. -** Tests filling "all" selections -** -****************************************************************/ -static void -test_select_fill_all(void) -{ - hid_t sid1; /* Dataspace ID */ - hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; - unsigned fill_value; /* Fill value */ - fill_iter_info iter_info; /* Iterator information structure */ - hsize_t points[SPACE7_DIM1 * SPACE7_DIM2][SPACE7_RANK]; /* Coordinates of selection */ - unsigned *wbuf, /* buffer to write to disk */ - *tbuf; /* temporary buffer pointer */ - unsigned u, v; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Filling 'all' Selections\n")); - - /* Allocate memory buffer */ - wbuf = (unsigned *)malloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf, "malloc"); - - /* Initialize memory buffer */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++) - *tbuf++ = (u * SPACE7_DIM2) + v; - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Space defaults to "all" selection */ - - /* Set fill value */ - fill_value = SPACE7_FILL; - - /* Fill selection in memory */ - ret = H5Dfill(&fill_value, H5T_NATIVE_UINT, wbuf, H5T_NATIVE_UINT, sid1); - CHECK(ret, FAIL, "H5Dfill"); - - /* Verify memory buffer the hard way... */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++) - if (*tbuf != fill_value) - TestErrPrintf("Error! v=%d, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, fill_value); - - /* Set the coordinates of the selection */ - for (u = 0; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++) { - points[(u * SPACE7_DIM2) + v][0] = u; - points[(u * SPACE7_DIM2) + v][1] = v; - } /* end for */ - - /* Initialize the iterator structure */ - iter_info.fill_value = SPACE7_FILL; - iter_info.curr_coord = 0; - iter_info.coords = (hsize_t *)points; - - /* Iterate through selection, verifying correct data */ - ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Free memory buffers */ - free(wbuf); -} /* test_select_fill_all() */ - -/**************************************************************** -** -** test_select_fill_point(): Test basic H5S (dataspace) selection code. -** Tests filling "point" selections -** -****************************************************************/ -static void -test_select_fill_point(hssize_t *offset) -{ - hid_t sid1; /* Dataspace ID */ - hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; - hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ - hsize_t points[5][SPACE7_RANK] = {{2, 4}, {3, 8}, {8, 4}, {7, 5}, {7, 7}}; - size_t num_points = 5; /* Number of points selected */ - int fill_value; /* Fill value */ - fill_iter_info iter_info; /* Iterator information structure */ - unsigned *wbuf, /* buffer to write to disk */ - *tbuf; /* temporary buffer pointer */ - unsigned u, v, w; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Filling 'point' Selections\n")); - - /* Allocate memory buffer */ - wbuf = (unsigned *)malloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf, "malloc"); - - /* Initialize memory buffer */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++) - *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v; - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Select "point" selection */ - ret = H5Sselect_elements(sid1, H5S_SELECT_SET, num_points, (const hsize_t *)points); - CHECK(ret, FAIL, "H5Sselect_elements"); - - if (offset != NULL) { - memcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); - - /* Set offset, if provided */ - ret = H5Soffset_simple(sid1, real_offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - } /* end if */ - else - memset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); - - /* Set fill value */ - fill_value = SPACE7_FILL; - - /* Fill selection in memory */ - ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); - CHECK(ret, FAIL, "H5Dfill"); - - /* Verify memory buffer the hard way... */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { - for (w = 0; w < (unsigned)num_points; w++) { - if (u == (unsigned)(points[w][0] + (hsize_t)real_offset[0]) && - v == (unsigned)(points[w][1] + (hsize_t)real_offset[1])) { - if (*tbuf != (unsigned)fill_value) - TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, - (unsigned)fill_value); - break; - } /* end if */ - } /* end for */ - if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v)) - TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf, - ((u * SPACE7_DIM2) + v)); - } /* end for */ - - /* Initialize the iterator structure */ - iter_info.fill_value = SPACE7_FILL; - iter_info.curr_coord = 0; - iter_info.coords = (hsize_t *)points; - - /* Add in the offset */ - for (u = 0; u < (unsigned)num_points; u++) { - points[u][0] = (hsize_t)((hssize_t)points[u][0] + real_offset[0]); - points[u][1] = (hsize_t)((hssize_t)points[u][1] + real_offset[1]); - } /* end for */ - - /* Iterate through selection, verifying correct data */ - ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Free memory buffers */ - free(wbuf); -} /* test_select_fill_point() */ - -/**************************************************************** -** -** test_select_fill_hyper_simple(): Test basic H5S (dataspace) selection code. -** Tests filling "simple" (i.e. one block) hyperslab selections -** -****************************************************************/ -static void -test_select_fill_hyper_simple(hssize_t *offset) -{ - hid_t sid1; /* Dataspace ID */ - hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; - hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ - hsize_t start[SPACE7_RANK]; /* Hyperslab start */ - hsize_t count[SPACE7_RANK]; /* Hyperslab block size */ - size_t num_points; /* Number of points in selection */ - hsize_t points[16][SPACE7_RANK]; /* Coordinates selected */ - int fill_value; /* Fill value */ - fill_iter_info iter_info; /* Iterator information structure */ - unsigned *wbuf, /* buffer to write to disk */ - *tbuf; /* temporary buffer pointer */ - unsigned u, v; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Filling Simple 'hyperslab' Selections\n")); - - /* Allocate memory buffer */ - wbuf = (unsigned *)malloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf, "malloc"); - - /* Initialize memory buffer */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++) - *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v; - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Select "hyperslab" selection */ - start[0] = 3; - start[1] = 3; - count[0] = 4; - count[1] = 4; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - if (offset != NULL) { - memcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); - - /* Set offset, if provided */ - ret = H5Soffset_simple(sid1, real_offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - } /* end if */ - else - memset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); - - /* Set fill value */ - fill_value = SPACE7_FILL; - - /* Fill selection in memory */ - ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); - CHECK(ret, FAIL, "H5Dfill"); - - /* Verify memory buffer the hard way... */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { - if ((u >= (unsigned)((hssize_t)start[0] + real_offset[0]) && - u < (unsigned)((hssize_t)(start[0] + count[0]) + real_offset[0])) && - (v >= (unsigned)((hssize_t)start[1] + real_offset[1]) && - v < (unsigned)((hssize_t)(start[1] + count[1]) + real_offset[1]))) { - if (*tbuf != (unsigned)fill_value) - TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, - (unsigned)fill_value); - } /* end if */ - else { - if (*tbuf != ((unsigned)(u * SPACE7_DIM2) + v)) - TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf, - ((u * SPACE7_DIM2) + v)); - } /* end else */ - } /* end for */ - - /* Initialize the iterator structure */ - iter_info.fill_value = SPACE7_FILL; - iter_info.curr_coord = 0; - iter_info.coords = (hsize_t *)points; - - /* Set the coordinates of the selection (with the offset) */ - for (u = 0, num_points = 0; u < (unsigned)count[0]; u++) - for (v = 0; v < (unsigned)count[1]; v++, num_points++) { - points[num_points][0] = (hsize_t)((hssize_t)(u + start[0]) + real_offset[0]); - points[num_points][1] = (hsize_t)((hssize_t)(v + start[1]) + real_offset[1]); - } /* end for */ - - /* Iterate through selection, verifying correct data */ - ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Free memory buffers */ - free(wbuf); -} /* test_select_fill_hyper_simple() */ - -/**************************************************************** -** -** test_select_fill_hyper_regular(): Test basic H5S (dataspace) selection code. -** Tests filling "regular" (i.e. strided block) hyperslab selections -** -****************************************************************/ -static void -test_select_fill_hyper_regular(hssize_t *offset) -{ - hid_t sid1; /* Dataspace ID */ - hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; - hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ - hsize_t start[SPACE7_RANK]; /* Hyperslab start */ - hsize_t stride[SPACE7_RANK]; /* Hyperslab stride size */ - hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ - hsize_t block[SPACE7_RANK]; /* Hyperslab block size */ - hsize_t points[16][SPACE7_RANK] = { - {2, 2}, {2, 3}, {2, 6}, {2, 7}, {3, 2}, {3, 3}, {3, 6}, {3, 7}, - {6, 2}, {6, 3}, {6, 6}, {6, 7}, {7, 2}, {7, 3}, {7, 6}, {7, 7}, - }; - size_t num_points = 16; /* Number of points selected */ - int fill_value; /* Fill value */ - fill_iter_info iter_info; /* Iterator information structure */ - unsigned *wbuf, /* buffer to write to disk */ - *tbuf; /* temporary buffer pointer */ - unsigned u, v, w; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Filling Regular 'hyperslab' Selections\n")); - - /* Allocate memory buffer */ - wbuf = (unsigned *)malloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf, "malloc"); - - /* Initialize memory buffer */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++) - *tbuf++ = (u * SPACE7_DIM2) + v; - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Select "hyperslab" selection */ - start[0] = 2; - start[1] = 2; - stride[0] = 4; - stride[1] = 4; - count[0] = 2; - count[1] = 2; - block[0] = 2; - block[1] = 2; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - if (offset != NULL) { - memcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); - - /* Set offset, if provided */ - ret = H5Soffset_simple(sid1, real_offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - } /* end if */ - else - memset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); - - /* Set fill value */ - fill_value = SPACE7_FILL; - - /* Fill selection in memory */ - ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); - CHECK(ret, FAIL, "H5Dfill"); - - /* Verify memory buffer the hard way... */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { - for (w = 0; w < (unsigned)num_points; w++) { - if (u == (unsigned)((hssize_t)points[w][0] + real_offset[0]) && - v == (unsigned)((hssize_t)points[w][1] + real_offset[1])) { - if (*tbuf != (unsigned)fill_value) - TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, - (unsigned)fill_value); - break; - } /* end if */ - } /* end for */ - if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v)) - TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf, - ((u * SPACE7_DIM2) + v)); - } /* end for */ - - /* Initialize the iterator structure */ - iter_info.fill_value = SPACE7_FILL; - iter_info.curr_coord = 0; - iter_info.coords = (hsize_t *)points; - - /* Add in the offset */ - for (u = 0; u < (unsigned)num_points; u++) { - points[u][0] = (hsize_t)((hssize_t)points[u][0] + real_offset[0]); - points[u][1] = (hsize_t)((hssize_t)points[u][1] + real_offset[1]); - } /* end for */ - - /* Iterate through selection, verifying correct data */ - ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Free memory buffers */ - free(wbuf); -} /* test_select_fill_hyper_regular() */ - -/**************************************************************** -** -** test_select_fill_hyper_irregular(): Test basic H5S (dataspace) selection code. -** Tests filling "irregular" (i.e. combined blocks) hyperslab selections -** -****************************************************************/ -static void -test_select_fill_hyper_irregular(hssize_t *offset) -{ - hid_t sid1; /* Dataspace ID */ - hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; - hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */ - hsize_t start[SPACE7_RANK]; /* Hyperslab start */ - hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ - hsize_t points[32][SPACE7_RANK] = { - /* Yes, some of the are duplicated.. */ - {2, 2}, {2, 3}, {2, 4}, {2, 5}, {3, 2}, {3, 3}, {3, 4}, {3, 5}, {4, 2}, {4, 3}, {4, 4}, - {4, 5}, {5, 2}, {5, 3}, {5, 4}, {5, 5}, {4, 4}, {4, 5}, {4, 6}, {4, 7}, {5, 4}, {5, 5}, - {5, 6}, {5, 7}, {6, 4}, {6, 5}, {6, 6}, {6, 7}, {7, 4}, {7, 5}, {7, 6}, {7, 7}, - }; - hsize_t iter_points[28][SPACE7_RANK] = { - /* Coordinates, as iterated through */ - {2, 2}, {2, 3}, {2, 4}, {2, 5}, {3, 2}, {3, 3}, {3, 4}, {3, 5}, {4, 2}, {4, 3}, - {4, 4}, {4, 5}, {4, 6}, {4, 7}, {5, 2}, {5, 3}, {5, 4}, {5, 5}, {5, 6}, {5, 7}, - {6, 4}, {6, 5}, {6, 6}, {6, 7}, {7, 4}, {7, 5}, {7, 6}, {7, 7}, - }; - size_t num_points = 32; /* Number of points selected */ - size_t num_iter_points = 28; /* Number of resulting points */ - int fill_value; /* Fill value */ - fill_iter_info iter_info; /* Iterator information structure */ - unsigned *wbuf, /* buffer to write to disk */ - *tbuf; /* temporary buffer pointer */ - unsigned u, v, w; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Filling Irregular 'hyperslab' Selections\n")); - - /* Allocate memory buffer */ - wbuf = (unsigned *)malloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf, "malloc"); - - /* Initialize memory buffer */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++) - *tbuf++ = (u * SPACE7_DIM2) + v; - - /* Create dataspace for dataset on disk */ - sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Select first "hyperslab" selection */ - start[0] = 2; - start[1] = 2; - count[0] = 4; - count[1] = 4; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Combine with second "hyperslab" selection */ - start[0] = 4; - start[1] = 4; - count[0] = 4; - count[1] = 4; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_OR, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - if (offset != NULL) { - memcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t)); - - /* Set offset, if provided */ - ret = H5Soffset_simple(sid1, real_offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - } /* end if */ - else - memset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t)); - - /* Set fill value */ - fill_value = SPACE7_FILL; - - /* Fill selection in memory */ - ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1); - CHECK(ret, FAIL, "H5Dfill"); - - /* Verify memory buffer the hard way... */ - for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++) - for (v = 0; v < SPACE7_DIM2; v++, tbuf++) { - for (w = 0; w < (unsigned)num_points; w++) { - if (u == (unsigned)((hssize_t)points[w][0] + real_offset[0]) && - v == (unsigned)((hssize_t)points[w][1] + real_offset[1])) { - if (*tbuf != (unsigned)fill_value) - TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, - (unsigned)fill_value); - break; - } /* end if */ - } /* end for */ - if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v)) - TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf, - ((u * SPACE7_DIM2) + v)); - } /* end for */ - - /* Initialize the iterator structure */ - iter_info.fill_value = SPACE7_FILL; - iter_info.curr_coord = 0; - iter_info.coords = (hsize_t *)iter_points; - - /* Add in the offset */ - for (u = 0; u < (unsigned)num_iter_points; u++) { - iter_points[u][0] = (hsize_t)((hssize_t)iter_points[u][0] + real_offset[0]); - iter_points[u][1] = (hsize_t)((hssize_t)iter_points[u][1] + real_offset[1]); - } /* end for */ - - /* Iterate through selection, verifying correct data */ - ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info); - CHECK(ret, FAIL, "H5Diterate"); - - /* Close dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Free memory buffers */ - free(wbuf); -} /* test_select_fill_hyper_irregular() */ - -/**************************************************************** -** -** test_select_none(): Test basic H5S (dataspace) selection code. -** Tests I/O on 0-sized point selections -** -****************************************************************/ -static void -test_select_none(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2}; - hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; - uint8_t *wbuf, /* buffer to write to disk */ - *rbuf, /* buffer to read from disk */ - *tbuf; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing I/O on 0-sized Selections\n")); - - /* Allocate write & read buffers */ - wbuf = (uint8_t *)malloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf, "malloc"); - rbuf = (uint8_t *)calloc(sizeof(uint8_t), SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize write buffer */ - for (i = 0, tbuf = wbuf; i < SPACE7_DIM1; i++) - for (j = 0; j < SPACE7_DIM2; j++) - *tbuf++ = (uint8_t)((i * SPACE7_DIM2) + j); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Make "none" selection in both disk and memory datasets */ - ret = H5Sselect_none(sid1); - CHECK(ret, FAIL, "H5Sselect_none"); - - ret = H5Sselect_none(sid2); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Attempt to read "nothing" from disk (before space is allocated) */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Write "nothing" to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Write "nothing" to disk (with a datatype conversion :-) */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Write "nothing" to disk (with NULL buffer argument) */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, NULL); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read "nothing" from disk (with NULL buffer argument) */ - ret = H5Dread(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, NULL); - CHECK(ret, FAIL, "H5Dread"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf); - free(rbuf); -} /* test_select_none() */ - -/**************************************************************** -** -** test_scalar_select(): Test basic H5S (dataspace) selection code. -** Tests selections on scalar dataspaces -** -****************************************************************/ -static void -test_scalar_select(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; - hsize_t coord1[SPACE7_RANK]; /* Coordinates for point selection */ - hsize_t start[SPACE7_RANK]; /* Hyperslab start */ - hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ - uint8_t *wbuf_uint8, /* buffer to write to disk */ - rval_uint8, /* value read back in */ - *tbuf_uint8; /* temporary buffer pointer */ - unsigned short *wbuf_ushort, /* another buffer to write to disk */ - rval_ushort, /* value read back in */ - *tbuf_ushort; /* temporary buffer pointer */ - int i, j; /* Counters */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing I/O on Selections in Scalar Dataspaces\n")); - - /* Allocate write & read buffers */ - wbuf_uint8 = (uint8_t *)malloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf_uint8, "malloc"); - wbuf_ushort = (unsigned short *)malloc(sizeof(unsigned short) * SPACE7_DIM1 * SPACE7_DIM2); - CHECK_PTR(wbuf_ushort, "malloc"); - - /* Initialize write buffers */ - for (i = 0, tbuf_uint8 = wbuf_uint8, tbuf_ushort = wbuf_ushort; i < SPACE7_DIM1; i++) - for (j = 0; j < SPACE7_DIM2; j++) { - *tbuf_uint8++ = (uint8_t)((i * SPACE7_DIM2) + j); - *tbuf_ushort++ = (unsigned short)((j * SPACE7_DIM2) + i); - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate(H5S_SCALAR); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Select one element in memory with a point selection */ - coord1[0] = 0; - coord1[1] = 2; - ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Write single point to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid1, sid1, H5P_DEFAULT, &rval_uint8); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_uint8 != *(wbuf_uint8 + 2)) - TestErrPrintf("Error! rval=%u, should be: *(wbuf+2)=%u\n", (unsigned)rval_uint8, - (unsigned)*(wbuf_uint8 + 2)); - - /* Write single point to disk (with a datatype conversion) */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid1, sid1, H5P_DEFAULT, &rval_ushort); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_ushort != *(wbuf_ushort + 2)) - TestErrPrintf("Error! rval=%u, should be: *(wbuf+2)=%u\n", (unsigned)rval_ushort, - (unsigned)*(wbuf_ushort + 2)); - - /* Select one element in memory with a hyperslab selection */ - start[0] = 4; - start[1] = 3; - count[0] = 1; - count[1] = 1; - ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write single hyperslab element to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid1, sid1, H5P_DEFAULT, &rval_uint8); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_uint8 != *(wbuf_uint8 + (SPACE7_DIM2 * 4) + 3)) - TestErrPrintf("Error! rval=%u, should be: *(wbuf+(SPACE7_DIM2*4)+3)=%u\n", (unsigned)rval_uint8, - (unsigned)*(wbuf_uint8 + (SPACE7_DIM2 * 4) + 3)); - - /* Write single hyperslab element to disk (with a datatype conversion) */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid1, sid1, H5P_DEFAULT, &rval_ushort); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_ushort != *(wbuf_ushort + (SPACE7_DIM2 * 4) + 3)) - TestErrPrintf("Error! rval=%u, should be: *(wbuf+(SPACE7_DIM2*4)+3)=%u\n", (unsigned)rval_ushort, - (unsigned)*(wbuf_ushort + (SPACE7_DIM2 * 4) + 3)); - - /* Select no elements in memory & file with "none" selections */ - ret = H5Sselect_none(sid1); - CHECK(ret, FAIL, "H5Sselect_none"); - - ret = H5Sselect_none(sid2); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Write no data to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Write no data to disk (with a datatype conversion) */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free memory buffers */ - free(wbuf_uint8); - free(wbuf_ushort); -} /* test_scalar_select() */ - -/**************************************************************** -** -** test_scalar_select2(): Tests selections on scalar dataspace, -** verify H5Sselect_hyperslab and H5Sselect_elements fails for -** scalar dataspace. -** -****************************************************************/ -static void -test_scalar_select2(void) -{ - hid_t sid; /* Dataspace ID */ - hsize_t coord1[1]; /* Coordinates for point selection */ - hsize_t start[1]; /* Hyperslab start */ - hsize_t count[1]; /* Hyperslab block count */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Selections in Scalar Dataspaces\n")); - - /* Create dataspace for dataset */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Select one element in memory with a point selection */ - coord1[0] = 0; - H5E_BEGIN_TRY - { - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sselect_elements"); - - /* Select one element in memory with a hyperslab selection */ - start[0] = 0; - count[0] = 0; - H5E_BEGIN_TRY - { - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select no elements in memory & file with "none" selection */ - ret = H5Sselect_none(sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Select all elements in memory & file with "all" selection */ - ret = H5Sselect_all(sid); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Close disk dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_scalar_select2() */ - -/**************************************************************** -** -** test_scalar_select3(): Test basic H5S (dataspace) selection code. -** Tests selections on scalar dataspaces in memory -** -****************************************************************/ -static void -test_scalar_select3(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1, sid2; /* Dataspace ID */ - hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2}; - hsize_t coord1[SPACE7_RANK]; /* Coordinates for point selection */ - hsize_t start[SPACE7_RANK]; /* Hyperslab start */ - hsize_t count[SPACE7_RANK]; /* Hyperslab block count */ - uint8_t wval_uint8, /* Value written out */ - rval_uint8; /* Value read in */ - unsigned short wval_ushort, /* Another value written out */ - rval_ushort; /* Another value read in */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing I/O on Selections in Scalar Dataspaces in Memory\n")); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid1 = H5Screate_simple(SPACE7_RANK, dims2, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create dataspace for writing buffer */ - sid2 = H5Screate(H5S_SCALAR); - CHECK(sid2, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Select one element in file with a point selection */ - coord1[0] = 0; - coord1[1] = 2; - ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Write single point to disk */ - wval_uint8 = 12; - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - rval_uint8 = 0; - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &rval_uint8); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_uint8 != wval_uint8) - TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_uint8, - (unsigned)wval_uint8); - - /* Write single point to disk (with a datatype conversion) */ - wval_ushort = 23; - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - rval_ushort = 0; - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &rval_ushort); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_ushort != wval_ushort) - TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_ushort, - (unsigned)wval_ushort); - - /* Select one element in file with a hyperslab selection */ - start[0] = 4; - start[1] = 3; - count[0] = 1; - count[1] = 1; - ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write single hyperslab element to disk */ - wval_uint8 = 92; - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - rval_uint8 = 0; - ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &rval_uint8); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_uint8 != wval_uint8) - TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_uint8, - (unsigned)wval_uint8); - - /* Write single hyperslab element to disk (with a datatype conversion) */ - wval_ushort = 107; - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read scalar element from disk */ - rval_ushort = 0; - ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &rval_ushort); - CHECK(ret, FAIL, "H5Dread"); - - /* Check value read back in */ - if (rval_ushort != wval_ushort) - TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_ushort, - (unsigned)wval_ushort); - - /* Select no elements in memory & file with "none" selections */ - ret = H5Sselect_none(sid1); - CHECK(ret, FAIL, "H5Sselect_none"); - - ret = H5Sselect_none(sid2); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Write no data to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Write no data to disk (with a datatype conversion) */ - ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close memory dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_scalar_select3() */ - -/**************************************************************** -** -** test_shape_same(): Tests selections on dataspace, verify that -** "shape same" routine is working correctly. -** -****************************************************************/ -static void -test_shape_same(void) -{ - hid_t all_sid; /* Dataspace ID with "all" selection */ - hid_t none_sid; /* Dataspace ID with "none" selection */ - hid_t single_pt_sid; /* Dataspace ID with single point selection */ - hid_t mult_pt_sid; /* Dataspace ID with multiple point selection */ - hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */ - hid_t single_hyper_all_sid; /* Dataspace ID with single block hyperslab - * selection that is the entire dataspace - */ - hid_t single_hyper_pt_sid; /* Dataspace ID with single block hyperslab - * selection that is the same as the single - * point selection - */ - hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */ - hid_t irreg_hyper_sid; /* Dataspace ID with irregular hyperslab selection */ - hid_t none_hyper_sid; /* Dataspace ID with "no hyperslabs" selection */ - hid_t scalar_all_sid; /* ID for scalar dataspace with "all" selection */ - hid_t scalar_none_sid; /* ID for scalar dataspace with "none" selection */ - hid_t tmp_sid; /* Temporary dataspace ID */ - hsize_t dims[] = {SPACE9_DIM1, SPACE9_DIM2}; - hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */ - hsize_t coord2[SPACE9_DIM2][SPACE9_RANK]; /* Coordinates for multiple point selection */ - hsize_t start[SPACE9_RANK]; /* Hyperslab start */ - hsize_t stride[SPACE9_RANK]; /* Hyperslab stride */ - hsize_t count[SPACE9_RANK]; /* Hyperslab block count */ - hsize_t block[SPACE9_RANK]; /* Hyperslab block size */ - unsigned u, v; /* Local index variables */ - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Same Shape Comparisons\n")); - assert(SPACE9_DIM2 >= POINT1_NPOINTS); - - /* Create dataspace for "all" selection */ - all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(all_sid, FAIL, "H5Screate_simple"); - - /* Select entire extent for dataspace */ - ret = H5Sselect_all(all_sid); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Create dataspace for "none" selection */ - none_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(none_sid, FAIL, "H5Screate_simple"); - - /* Un-Select entire extent for dataspace */ - ret = H5Sselect_none(none_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Create dataspace for single point selection */ - single_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_pt_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for multiple point selection */ - coord1[0][0] = 2; - coord1[0][1] = 2; - ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create dataspace for multiple point selection */ - mult_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(mult_pt_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for multiple point selection */ - coord2[0][0] = 2; - coord2[0][1] = 2; - coord2[1][0] = 7; - coord2[1][1] = 2; - coord2[2][0] = 1; - coord2[2][1] = 4; - coord2[3][0] = 2; - coord2[3][1] = 6; - coord2[4][0] = 0; - coord2[4][1] = 8; - coord2[5][0] = 3; - coord2[5][1] = 2; - coord2[6][0] = 4; - coord2[6][1] = 4; - coord2[7][0] = 1; - coord2[7][1] = 0; - coord2[8][0] = 5; - coord2[8][1] = 1; - coord2[9][0] = 9; - coord2[9][1] = 3; - ret = H5Sselect_elements(mult_pt_sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create dataspace for single hyperslab selection */ - single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_hyper_sid, FAIL, "H5Screate_simple"); - - /* Select 10x10 hyperslab for single hyperslab selection */ - start[0] = 1; - start[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = (SPACE9_DIM1 - 2); - block[1] = (SPACE9_DIM2 - 2); - ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for single hyperslab selection with entire extent selected */ - single_hyper_all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_hyper_all_sid, FAIL, "H5Screate_simple"); - - /* Select entire extent for hyperslab selection */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = SPACE9_DIM1; - block[1] = SPACE9_DIM2; - ret = H5Sselect_hyperslab(single_hyper_all_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for single hyperslab selection with single point selected */ - single_hyper_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_hyper_pt_sid, FAIL, "H5Screate_simple"); - - /* Select entire extent for hyperslab selection */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(single_hyper_pt_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for regular hyperslab selection */ - regular_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(regular_hyper_sid, FAIL, "H5Screate_simple"); - - /* Select regular, strided hyperslab selection */ - start[0] = 2; - start[1] = 2; - stride[0] = 2; - stride[1] = 2; - count[0] = 5; - count[1] = 2; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for irregular hyperslab selection */ - irreg_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(irreg_hyper_sid, FAIL, "H5Screate_simple"); - - /* Create irregular hyperslab selection by OR'ing two blocks together */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 4; - start[1] = 4; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 3; - block[1] = 3; - ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for "no" hyperslab selection */ - none_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(none_hyper_sid, FAIL, "H5Screate_simple"); - - /* Create "no" hyperslab selection by XOR'ing same blocks together */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_XOR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create scalar dataspace for "all" selection */ - scalar_all_sid = H5Screate(H5S_SCALAR); - CHECK(scalar_all_sid, FAIL, "H5Screate"); - - /* Create scalar dataspace for "none" selection */ - scalar_none_sid = H5Screate(H5S_SCALAR); - CHECK(scalar_none_sid, FAIL, "H5Screate"); - - /* Un-Select entire extent for dataspace */ - ret = H5Sselect_none(scalar_none_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Compare "all" selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(all_sid, all_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(all_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(all_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(all_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(all_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(all_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(all_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(all_sid, single_hyper_all_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(all_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(all_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(all_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(all_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(all_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(all_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare "none" selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(none_sid, none_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(none_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(none_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(none_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(none_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(none_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(none_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(none_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(none_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(none_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(none_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(none_sid, none_hyper_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(none_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(none_sid, scalar_none_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare single point selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(single_pt_sid, single_pt_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(single_pt_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(single_pt_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(single_pt_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(single_pt_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(single_pt_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, single_hyper_pt_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, scalar_all_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(single_pt_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare multiple point selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(mult_pt_sid, mult_pt_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(mult_pt_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(mult_pt_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(mult_pt_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(mult_pt_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(mult_pt_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(mult_pt_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare single "normal" hyperslab selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(single_hyper_sid, single_hyper_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(single_hyper_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(single_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(single_hyper_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(single_hyper_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(single_hyper_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(single_hyper_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - -#ifdef NOT_YET - /* In theory, these two selections are the same shape, but the - * H5Sselect_shape_same() routine is just not this sophisticated yet and it - * would take too much effort to make this work. The worst case is that the - * non-optimized chunk mapping routines will be invoked instead of the more - * optimized routines, so this only hurts performance, not correctness - */ - /* Construct point selection which matches "plain" hyperslab selection */ - /* Create dataspace for point selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of points for point selection */ - for (u = 1; u < (SPACE9_DIM1 - 1); u++) { - for (v = 1; v < (SPACE9_DIM2 - 1); v++) { - coord2[v - 1][0] = u; - coord2[v - 1][1] = v; - } /* end for */ - - ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, (SPACE9_DIM2 - 2), coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - } /* end for */ - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); -#endif /* NOT_YET */ - - /* Construct hyperslab selection which matches "plain" hyperslab selection */ - /* Create dataspace for hyperslab selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - /* Un-select entire extent */ - ret = H5Sselect_none(tmp_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Select sequence of rows for hyperslab selection */ - for (u = 1; u < (SPACE9_DIM1 - 1); u++) { - start[0] = u; - start[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = (SPACE9_DIM2 - 2); - ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end for */ - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare single "all" hyperslab selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_all_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(single_hyper_all_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, all_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - -#ifdef NOT_YET - /* In theory, these two selections are the same shape, but the - * H5S_select_shape_same() routine is just not this sophisticated yet and it - * would take too much effort to make this work. The worst case is that the - * non-optimized chunk mapping routines will be invoked instead of the more - * optimized routines, so this only hurts performance, not correctness - */ - /* Construct point selection which matches "all" hyperslab selection */ - /* Create dataspace for point selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of points for point selection */ - for (u = 0; u < SPACE9_DIM1; u++) { - for (v = 0; v < SPACE9_DIM2; v++) { - coord2[v][0] = u; - coord2[v][1] = v; - } /* end for */ - ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, SPACE9_DIM2, coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - } /* end for */ - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); -#endif /* NOT_YET */ - - /* Construct hyperslab selection which matches "all" hyperslab selection */ - /* Create dataspace for hyperslab selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - /* Un-select entire extent */ - ret = H5Sselect_none(tmp_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Select sequence of rows for hyperslab selection */ - for (u = 0; u < SPACE9_DIM2; u++) { - start[0] = u; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = SPACE9_DIM2; - ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end for */ - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_all_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare single "point" hyperslab selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_pt_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(single_hyper_pt_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(single_hyper_pt_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, single_pt_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, scalar_all_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(single_hyper_pt_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare regular, strided hyperslab selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(regular_hyper_sid, regular_hyper_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(regular_hyper_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(regular_hyper_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(regular_hyper_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(regular_hyper_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(regular_hyper_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Construct point selection which matches regular, strided hyperslab selection */ - /* Create dataspace for point selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of points for point selection */ - for (u = 2; u < 11; u += 2) { - for (v = 0; v < 2; v++) { - coord2[v][0] = u; - coord2[v][1] = (v * 2) + 2; - } /* end for */ - ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, (size_t)2, (const hsize_t *)coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - } /* end for */ - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Construct hyperslab selection which matches regular, strided hyperslab selection */ - /* Create dataspace for hyperslab selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - /* Un-select entire extent */ - ret = H5Sselect_none(tmp_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Select sequence of rows for hyperslab selection */ - for (u = 2; u < 11; u += 2) { - start[0] = u; - start[1] = 3; - stride[0] = 1; - stride[1] = 2; - count[0] = 1; - count[1] = 2; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end for */ - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Construct regular hyperslab selection with an offset which matches regular, strided hyperslab selection - */ - /* Create dataspace for hyperslab selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - /* Select regular, strided hyperslab selection at an offset */ - start[0] = 1; - start[1] = 1; - stride[0] = 2; - stride[1] = 2; - count[0] = 5; - count[1] = 2; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(regular_hyper_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare irregular hyperslab selection to all the selections created */ - /* Compare against itself */ - check = H5Sselect_shape_same(irreg_hyper_sid, irreg_hyper_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(irreg_hyper_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(irreg_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Construct hyperslab selection which matches irregular hyperslab selection */ - /* Create dataspace for hyperslab selection */ - tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(tmp_sid, FAIL, "H5Screate_simple"); - - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Select sequence of columns for hyperslab selection */ - for (u = 0; u < 3; u++) { - start[0] = 4; - start[1] = u + 4; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 3; - block[1] = 1; - ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end for */ - - /* Compare against hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(irreg_hyper_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare scalar "all" dataspace with all selections created */ - - /* Compare against itself */ - check = H5Sselect_shape_same(scalar_all_sid, scalar_all_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(scalar_all_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(scalar_all_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(scalar_all_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(scalar_all_sid, none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(scalar_all_sid, single_pt_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(scalar_all_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(scalar_all_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(scalar_all_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(scalar_all_sid, single_hyper_pt_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(scalar_all_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(scalar_all_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(scalar_all_sid, none_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against scalar "none" hyperslab selection */ - check = H5Sselect_shape_same(scalar_all_sid, scalar_none_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare scalar "none" dataspace with all selections created */ - - /* Compare against itself */ - check = H5Sselect_shape_same(scalar_none_sid, scalar_none_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against copy of itself */ - tmp_sid = H5Scopy(scalar_none_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); - - check = H5Sselect_shape_same(scalar_none_sid, tmp_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Compare against "all" selection */ - check = H5Sselect_shape_same(scalar_none_sid, all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "none" selection */ - check = H5Sselect_shape_same(scalar_none_sid, none_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against single point selection */ - check = H5Sselect_shape_same(scalar_none_sid, single_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against multiple point selection */ - check = H5Sselect_shape_same(scalar_none_sid, mult_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "plain" single hyperslab selection */ - check = H5Sselect_shape_same(scalar_none_sid, single_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "all" single hyperslab selection */ - check = H5Sselect_shape_same(scalar_none_sid, single_hyper_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "single point" single hyperslab selection */ - check = H5Sselect_shape_same(scalar_none_sid, single_hyper_pt_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against regular, strided hyperslab selection */ - check = H5Sselect_shape_same(scalar_none_sid, regular_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against irregular hyperslab selection */ - check = H5Sselect_shape_same(scalar_none_sid, irreg_hyper_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "no" hyperslab selection */ - check = H5Sselect_shape_same(scalar_none_sid, none_hyper_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Compare against scalar "all" hyperslab selection */ - check = H5Sselect_shape_same(scalar_none_sid, scalar_all_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(all_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(none_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_pt_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(mult_pt_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_hyper_all_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_hyper_pt_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(regular_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(irreg_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(none_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(scalar_all_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(scalar_none_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_shape_same() */ - -/**************************************************************** -** -** test_shape_same_dr__smoke_check_1(): -** -** Create a square, 2-D dataspace (10 X 10), and select -** all of it. -** -** Similarly, create nine, 3-D dataspaces (10 X 10 X 10), -** and select (10 X 10 X 1) hyperslabs in each, three with -** the slab parallel to the xy plane, three parallel to the -** xz plane, and three parallel to the yz plane. -** -** Assuming that z is the fastest changing dimension, -** H5Sselect_shape_same() should return true when comparing -** the full 2-D space against any hyperslab parallel to the -** yz plane in the 3-D space, and false when comparing the -** full 2-D space against the other two hyperslabs. -** -** Also create two additional 3-D dataspaces (10 X 10 X 10), -** and select a (10 X 10 X 2) hyperslab parallel to the yz -** axis in one of them, and two parallel (10 X 10 X 1) hyper -** slabs parallel to the yz axis in the other. -** H5Sselect_shape_same() should return false when comparing -** each to the 2-D selection. -** -****************************************************************/ -static void -test_shape_same_dr__smoke_check_1(void) -{ - hid_t small_square_sid; - hid_t small_cube_xy_slice_0_sid; - hid_t small_cube_xy_slice_1_sid; - hid_t small_cube_xy_slice_2_sid; - hid_t small_cube_xz_slice_0_sid; - hid_t small_cube_xz_slice_1_sid; - hid_t small_cube_xz_slice_2_sid; - hid_t small_cube_yz_slice_0_sid; - hid_t small_cube_yz_slice_1_sid; - hid_t small_cube_yz_slice_2_sid; - hid_t small_cube_yz_slice_3_sid; - hid_t small_cube_yz_slice_4_sid; - hsize_t small_cube_dims[] = {10, 10, 10}; - hsize_t start[3]; - hsize_t stride[3]; - hsize_t count[3]; - hsize_t block[3]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - MESSAGE(7, (" Smoke check 1: Slices through a cube.\n")); - - /* Create the 10 x 10 dataspace */ - small_square_sid = H5Screate_simple(2, small_cube_dims, NULL); - CHECK(small_square_sid, FAIL, "H5Screate_simple"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */ - small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple"); - - start[0] = 0; /* x */ - start[1] = 0; /* y */ - start[2] = 0; /* z */ - - /* stride is a bit silly here, since we are only selecting a single */ - /* contiguous plane, but include it anyway, with values large enough */ - /* to ensure that we will only get the single block selected. */ - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 10; /* x */ - block[1] = 10; /* y */ - block[2] = 1; /* z */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[2] = 5; - ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[2] = 9; - ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */ - small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple"); - - start[0] = 0; /* x */ - start[1] = 0; /* y */ - start[2] = 0; /* z */ - - /* stride is a bit silly here, since we are only selecting a single */ - /* contiguous chunk, but include it anyway, with values large enough */ - /* to ensure that we will only get the single chunk. */ - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 10; /* x */ - block[1] = 1; /* y */ - block[2] = 10; /* z */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[1] = 4; - ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[1] = 9; - ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */ - small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_4_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_4_sid, FAIL, "H5Screate_simple"); - - start[0] = 0; /* x */ - start[1] = 0; /* y */ - start[2] = 0; /* z */ - - /* stride is a bit silly here, since we are only selecting a single */ - /* contiguous chunk, but include it anyway, with values large enough */ - /* to ensure that we will only get the single chunk. */ - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 1; /* x */ - block[1] = 10; /* y */ - block[2] = 10; /* z */ - - ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 4; - ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 9; - ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 4; - block[0] = 2; - ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 3; - block[0] = 1; - ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 6; - ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* setup is done -- run the tests: */ - - /* Compare against "xy" selection */ - check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "xz" selection */ - check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "yz" selection */ - check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_3_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_4_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(small_square_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_3_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_4_sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* test_shape_same_dr__smoke_check_1() */ - -/**************************************************************** -** -** test_shape_same_dr__smoke_check_2(): -** -** Create a square, 2-D dataspace (10 X 10), and select -** a "checker board" hyperslab as follows: -** -** * * - - * * - - * * -** * * - - * * - - * * -** - - * * - - * * - - -** - - * * - - * * - - -** * * - - * * - - * * -** * * - - * * - - * * -** - - * * - - * * - - -** - - * * - - * * - - -** * * - - * * - - * * -** * * - - * * - - * * -** -** where asterisks indicate selected elements, and dashes -** indicate unselected elements. -** -** Similarly, create nine, 3-D dataspaces (10 X 10 X 10), -** and select similar (10 X 10 X 1) checker board hyper -** slabs in each, three with the slab parallel to the xy -** plane, three parallel to the xz plane, and three parallel -** to the yz plane. -** -** Assuming that z is the fastest changing dimension, -** H5Sselect_shape_same() should return true when comparing -** the 2-D space checker board selection against a checker -** board hyperslab parallel to the yz plane in the 3-D -** space, and false when comparing the 2-D checkerboard -** selection against two hyperslabs parallel to the xy -** or xz planes. -** -** Also create an additional 3-D dataspaces (10 X 10 X 10), -** and select a checker board parallel with the yz axis, -** save with some squares being on different planes. -** H5Sselect_shape_same() should return false when -** comparing this selection to the 2-D selection. -** -****************************************************************/ -static void -test_shape_same_dr__smoke_check_2(void) -{ - hid_t small_square_sid; - hid_t small_cube_xy_slice_0_sid; - hid_t small_cube_xy_slice_1_sid; - hid_t small_cube_xy_slice_2_sid; - hid_t small_cube_xz_slice_0_sid; - hid_t small_cube_xz_slice_1_sid; - hid_t small_cube_xz_slice_2_sid; - hid_t small_cube_yz_slice_0_sid; - hid_t small_cube_yz_slice_1_sid; - hid_t small_cube_yz_slice_2_sid; - hid_t small_cube_yz_slice_3_sid; - hsize_t small_cube_dims[] = {10, 10, 10}; - hsize_t start[3]; - hsize_t stride[3]; - hsize_t count[3]; - hsize_t block[3]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - MESSAGE(7, (" Smoke check 2: Checker board slices through a cube.\n")); - - /* Create the 10 x 10 dataspace */ - small_square_sid = H5Screate_simple(2, small_cube_dims, NULL); - CHECK(small_square_sid, FAIL, "H5Screate_simple"); - - start[0] = 0; /* x */ - start[1] = 0; /* y */ - - stride[0] = 4; /* x */ - stride[1] = 4; /* y */ - - count[0] = 3; /* x */ - count[1] = 3; /* y */ - - block[0] = 2; /* x */ - block[1] = 2; /* y */ - ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 2; /* x */ - start[1] = 2; /* y */ - - stride[0] = 4; /* x */ - stride[1] = 4; /* y */ - - count[0] = 2; /* x */ - count[1] = 2; /* y */ - - block[0] = 2; /* x */ - block[1] = 2; /* y */ - ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */ - small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple"); - - start[0] = 0; /* x */ - start[1] = 0; /* y */ - start[2] = 0; /* z */ - - stride[0] = 4; /* x */ - stride[1] = 4; /* y */ - stride[2] = 20; /* z -- large enough that there will only be one slice */ - - count[0] = 3; /* x */ - count[1] = 3; /* y */ - count[2] = 1; /* z */ - - block[0] = 2; /* x */ - block[1] = 2; /* y */ - block[2] = 1; /* z */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[2] = 3; - ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[2] = 9; - ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 2; /* x */ - start[1] = 2; /* y */ - start[2] = 0; /* z */ - - stride[0] = 4; /* x */ - stride[1] = 4; /* y */ - stride[2] = 20; /* z -- large enough that there will only be one slice */ - - count[0] = 2; /* x */ - count[1] = 2; /* y */ - count[2] = 1; /* z */ - - block[0] = 2; /* x */ - block[1] = 2; /* y */ - block[2] = 1; /* z */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[2] = 3; - ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[2] = 9; - ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */ - small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple"); - - start[0] = 0; /* x */ - start[1] = 0; /* y */ - start[2] = 0; /* z */ - - stride[0] = 4; /* x */ - stride[1] = 20; /* y -- large enough that there will only be one slice */ - stride[2] = 4; /* z */ - - count[0] = 3; /* x */ - count[1] = 1; /* y */ - count[2] = 3; /* z */ - - block[0] = 2; /* x */ - block[1] = 1; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[1] = 5; - ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[1] = 9; - ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 2; /* x */ - start[1] = 0; /* y */ - start[2] = 2; /* z */ - - stride[0] = 4; /* x */ - stride[1] = 20; /* y -- large enough that there will only be one slice */ - stride[2] = 4; /* z */ - - count[0] = 2; /* x */ - count[1] = 1; /* y */ - count[2] = 2; /* z */ - - block[0] = 2; /* x */ - block[1] = 1; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[1] = 5; - ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[1] = 9; - ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */ - small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple"); - - start[0] = 0; /* x */ - start[1] = 0; /* y */ - start[2] = 0; /* z */ - - stride[0] = 20; /* x -- large enough that there will only be one slice */ - stride[1] = 4; /* y */ - stride[2] = 4; /* z */ - - count[0] = 1; /* x */ - count[1] = 3; /* y */ - count[2] = 3; /* z */ - - block[0] = 1; /* x */ - block[1] = 2; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 8; - ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 9; - ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 3; - ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 0; /* x */ - start[1] = 2; /* y */ - start[2] = 2; /* z */ - - stride[0] = 20; /* x -- large enough that there will only be one slice */ - stride[1] = 4; /* y */ - stride[2] = 4; /* z */ - - count[0] = 1; /* x */ - count[1] = 2; /* y */ - count[2] = 2; /* z */ - - block[0] = 1; /* x */ - block[1] = 2; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 8; - ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 9; - ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 4; - /* This test gets the right answer, but it fails the shape same - * test in an unexpected point. Bring this up with Quincey, as - * the oddness looks like it is not related to my code. - * -- JRM - */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* setup is done -- run the tests: */ - - /* Compare against "xy" selection */ - check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "xz" selection */ - check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "yz" selection */ - check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_3_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(small_square_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_3_sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* test_shape_same_dr__smoke_check_2() */ - -/**************************************************************** -** -** test_shape_same_dr__smoke_check_3(): -** -** Create a square, 2-D dataspace (10 X 10), and select an -** irregular hyperslab as follows: -** -** y -** 9 - - - - - - - - - - -** 8 - - - - - - - - - - -** 7 - - - * * * * - - - -** 6 - - * * * * * - - - -** 5 - - * * - - - - - - -** 4 - - * * - * * - - - -** 3 - - * * - * * - - - -** 2 - - - - - - - - - - -** 1 - - - - - - - - - - -** 0 - - - - - - - - - - -** 0 1 2 3 4 5 6 7 8 9 x -** -** where asterisks indicate selected elements, and dashes -** indicate unselected elements. -** -** Similarly, create nine, 3-D dataspaces (10 X 10 X 10), -** and select similar irregular hyperslabs in each, three -** with the slab parallel to the xy plane, three parallel -** to the xz plane, and three parallel to the yz plane. -** Further, translate the irregular slab in 2/3rds of the -** cases. -** -** Assuming that z is the fastest changing dimension, -** H5Sselect_shape_same() should return true when -** comparing the 2-D irregular hyperslab selection -** against the irregular hyperslab selections parallel -** to the yz plane in the 3-D space, and false when -** comparing it against the irregular hyperslabs -** selections parallel to the xy or xz planes. -** -****************************************************************/ -static void -test_shape_same_dr__smoke_check_3(void) -{ - hid_t small_square_sid; - hid_t small_cube_xy_slice_0_sid; - hid_t small_cube_xy_slice_1_sid; - hid_t small_cube_xy_slice_2_sid; - hid_t small_cube_xz_slice_0_sid; - hid_t small_cube_xz_slice_1_sid; - hid_t small_cube_xz_slice_2_sid; - hid_t small_cube_yz_slice_0_sid; - hid_t small_cube_yz_slice_1_sid; - hid_t small_cube_yz_slice_2_sid; - hsize_t small_cube_dims[] = {10, 10, 10}; - hsize_t start[3]; - hsize_t stride[3]; - hsize_t count[3]; - hsize_t block[3]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - MESSAGE(7, (" Smoke check 3: Offset subsets of slices through a cube.\n")); - - /* Create the 10 x 10 dataspace */ - small_square_sid = H5Screate_simple(2, small_cube_dims, NULL); - CHECK(small_square_sid, FAIL, "H5Screate_simple"); - - start[0] = 2; /* x */ - start[1] = 3; /* y */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - - block[0] = 2; /* x */ - block[1] = 4; /* y */ - ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 3; /* x */ - start[1] = 6; /* y */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - - block[0] = 4; /* x */ - block[1] = 2; /* y */ - ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 5; /* x */ - start[1] = 3; /* y */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - - block[0] = 2; /* x */ - block[1] = 2; /* y */ - ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */ - small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple"); - - start[0] = 2; /* x */ - start[1] = 3; /* y */ - start[2] = 5; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 2; /* x */ - block[1] = 4; /* y */ - block[2] = 1; /* z */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[0] -= 1; /* x */ - start[1] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[1] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 3; /* x */ - start[1] = 6; /* y */ - start[2] = 5; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 4; /* x */ - block[1] = 2; /* y */ - block[2] = 1; /* z */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[0] -= 1; /* x */ - start[1] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[1] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 5; /* x */ - start[1] = 3; /* y */ - start[2] = 5; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 2; /* x */ - block[1] = 2; /* y */ - block[2] = 1; /* z */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[0] -= 1; /* x */ - start[1] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[1] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */ - small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple"); - - start[0] = 2; /* x */ - start[1] = 5; /* y */ - start[2] = 3; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 2; /* x */ - block[1] = 1; /* y */ - block[2] = 4; /* z */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[0] -= 1; /* x */ - start[2] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[2] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 3; /* x */ - start[1] = 5; /* y */ - start[2] = 6; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 4; /* x */ - block[1] = 1; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[0] -= 1; /* x */ - start[2] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[2] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 5; /* x */ - start[1] = 5; /* y */ - start[2] = 3; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 2; /* x */ - block[1] = 1; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[0] -= 1; /* x */ - start[2] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[2] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* QAK: Start here. - */ - /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */ - small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple"); - - small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL); - CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple"); - - start[0] = 8; /* x */ - start[1] = 2; /* y */ - start[2] = 3; /* z */ - - stride[0] = 20; /* x -- large enough that there will only be one slice */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 1; /* x */ - block[1] = 2; /* y */ - block[2] = 4; /* z */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[1] -= 1; /* x */ - start[2] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[2] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 8; /* x */ - start[1] = 3; /* y */ - start[2] = 6; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 1; /* x */ - block[1] = 4; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[1] -= 1; /* x */ - start[2] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[2] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 8; /* x */ - start[1] = 5; /* y */ - start[2] = 3; /* z */ - - stride[0] = 20; /* x */ - stride[1] = 20; /* y */ - stride[2] = 20; /* z */ - - count[0] = 1; /* x */ - count[1] = 1; /* y */ - count[2] = 1; /* z */ - - block[0] = 1; /* x */ - block[1] = 2; /* y */ - block[2] = 2; /* z */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the starting point to the origin */ - start[1] -= 1; /* x */ - start[2] -= 2; /* y */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* move the irregular selection to the upper right hand corner */ - start[0] += 5; /* x */ - start[2] += 5; /* y */ - ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* setup is done -- run the tests: */ - - /* Compare against "xy" selection */ - check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "xz" selection */ - check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Compare against "yz" selection */ - check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(small_square_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xy_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_xz_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_cube_yz_slice_2_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_shape_same_dr__smoke_check_3() */ - -/**************************************************************** -** -** test_shape_same_dr__smoke_check_4(): -** -** Create a square, 2-D dataspace (10 X 10), and select -** the entire space. -** -** Similarly, create 3-D and 4-D dataspaces: -** -** (1 X 10 X 10) -** (10 X 1 X 10) -** (10 X 10 X 1) -** (10 X 10 X 10) -** -** (1 X 1 X 10 X 10) -** (1 X 10 X 1 X 10) -** (1 X 10 X 10 X 1) -** (10 X 1 X 1 X 10) -** (10 X 1 X 10 X 1) -** (10 X 10 X 1 X 1) -** (10 X 1 X 10 X 10) -** -** And select these entire spaces as well. -** -** Compare the 2-D space against all the other spaces -** with H5Sselect_shape_same(). The (1 X 10 X 10) & -** (1 X 1 X 10 X 10) should return true. All others -** should return false. -** -****************************************************************/ -static void -test_shape_same_dr__smoke_check_4(void) -{ - hid_t square_sid; - hid_t three_d_space_0_sid; - hid_t three_d_space_1_sid; - hid_t three_d_space_2_sid; - hid_t three_d_space_3_sid; - hid_t four_d_space_0_sid; - hid_t four_d_space_1_sid; - hid_t four_d_space_2_sid; - hid_t four_d_space_3_sid; - hid_t four_d_space_4_sid; - hid_t four_d_space_5_sid; - hid_t four_d_space_6_sid; - hsize_t dims[] = {10, 10, 10, 10}; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - MESSAGE(7, (" Smoke check 4: Spaces of different dimension but same size.\n")); - - /* Create the 10 x 10 dataspace */ - square_sid = H5Screate_simple(2, dims, NULL); - CHECK(square_sid, FAIL, "H5Screate_simple"); - - /* create (1 X 10 X 10) dataspace */ - dims[0] = 1; - dims[1] = 10; - dims[2] = 10; - three_d_space_0_sid = H5Screate_simple(3, dims, NULL); - CHECK(three_d_space_0_sid, FAIL, "H5Screate_simple"); - - /* create (10 X 1 X 10) dataspace */ - dims[0] = 10; - dims[1] = 1; - dims[2] = 10; - three_d_space_1_sid = H5Screate_simple(3, dims, NULL); - CHECK(three_d_space_1_sid, FAIL, "H5Screate_simple"); - - /* create (10 X 10 X 1) dataspace */ - dims[0] = 10; - dims[1] = 10; - dims[2] = 1; - three_d_space_2_sid = H5Screate_simple(3, dims, NULL); - CHECK(three_d_space_2_sid, FAIL, "H5Screate_simple"); - - /* create (10 X 10 X 10) dataspace */ - dims[0] = 10; - dims[1] = 10; - dims[2] = 10; - three_d_space_3_sid = H5Screate_simple(3, dims, NULL); - CHECK(three_d_space_3_sid, FAIL, "H5Screate_simple"); - - /* create (1 X 1 X 10 X 10) dataspace */ - dims[0] = 1; - dims[1] = 1; - dims[2] = 10; - dims[3] = 10; - four_d_space_0_sid = H5Screate_simple(4, dims, NULL); - CHECK(four_d_space_0_sid, FAIL, "H5Screate_simple"); - - /* create (1 X 10 X 1 X 10) dataspace */ - dims[0] = 1; - dims[1] = 10; - dims[2] = 1; - dims[3] = 10; - four_d_space_1_sid = H5Screate_simple(4, dims, NULL); - CHECK(four_d_space_1_sid, FAIL, "H5Screate_simple"); - - /* create (1 X 10 X 10 X 1) dataspace */ - dims[0] = 1; - dims[1] = 10; - dims[2] = 10; - dims[3] = 1; - four_d_space_2_sid = H5Screate_simple(4, dims, NULL); - CHECK(four_d_space_2_sid, FAIL, "H5Screate_simple"); - - /* create (10 X 1 X 1 X 10) dataspace */ - dims[0] = 10; - dims[1] = 1; - dims[2] = 1; - dims[3] = 10; - four_d_space_3_sid = H5Screate_simple(4, dims, NULL); - CHECK(four_d_space_3_sid, FAIL, "H5Screate_simple"); - - /* create (10 X 1 X 10 X 1) dataspace */ - dims[0] = 10; - dims[1] = 1; - dims[2] = 10; - dims[3] = 1; - four_d_space_4_sid = H5Screate_simple(4, dims, NULL); - CHECK(four_d_space_4_sid, FAIL, "H5Screate_simple"); - - /* create (10 X 10 X 1 X 1) dataspace */ - dims[0] = 10; - dims[1] = 10; - dims[2] = 1; - dims[3] = 1; - four_d_space_5_sid = H5Screate_simple(4, dims, NULL); - CHECK(four_d_space_5_sid, FAIL, "H5Screate_simple"); - - /* create (10 X 1 X 10 X 10) dataspace */ - dims[0] = 10; - dims[1] = 1; - dims[2] = 10; - dims[3] = 10; - four_d_space_6_sid = H5Screate_simple(4, dims, NULL); - CHECK(four_d_space_6_sid, FAIL, "H5Screate_simple"); - - /* setup is done -- run the tests: */ - - check = H5Sselect_shape_same(three_d_space_0_sid, square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(three_d_space_1_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(three_d_space_2_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(three_d_space_3_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(four_d_space_0_sid, square_sid); - VERIFY(check, true, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(four_d_space_1_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(four_d_space_2_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(four_d_space_3_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(four_d_space_4_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(four_d_space_5_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - check = H5Sselect_shape_same(four_d_space_6_sid, square_sid); - VERIFY(check, false, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(square_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(three_d_space_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(three_d_space_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(three_d_space_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(three_d_space_3_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(four_d_space_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(four_d_space_1_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(four_d_space_2_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(four_d_space_3_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(four_d_space_4_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(four_d_space_5_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(four_d_space_6_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_shape_same_dr__smoke_check_4() */ - -/**************************************************************** -** -** test_shape_same_dr__full_space_vs_slice(): Tests selection -** of a full n-cube dataspace vs an n-dimensional slice of -** of an m-cube (m > n) in a call to H5Sselect_shape_same(). -** Note that this test does not require the n-cube and the -** n-dimensional slice to have the same rank (although -** H5Sselect_shape_same() should always return false if -** they don't). -** -** Per Quincey's suggestion, only test up to 5 dimensional -** spaces. -** -****************************************************************/ -static void -test_shape_same_dr__full_space_vs_slice(int test_num, int small_rank, int large_rank, int offset, - hsize_t edge_size, bool dim_selected[], bool expected_result) -{ - char test_desc_0[128]; - char test_desc_1[256]; - int i; - hid_t n_cube_0_sid; /* the fully selected hyper cube */ - hid_t n_cube_1_sid; /* the hyper cube in which a slice is selected */ - hsize_t dims[SS_DR_MAX_RANK]; - hsize_t start[SS_DR_MAX_RANK]; - hsize_t *start_ptr; - hsize_t stride[SS_DR_MAX_RANK]; - hsize_t *stride_ptr; - hsize_t count[SS_DR_MAX_RANK]; - hsize_t *count_ptr; - hsize_t block[SS_DR_MAX_RANK]; - hsize_t *block_ptr; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - assert(0 < small_rank); - assert(small_rank <= large_rank); - assert(large_rank <= SS_DR_MAX_RANK); - assert(0 <= offset); - assert(offset < large_rank); - assert(edge_size > 0); - assert(edge_size <= 1000); - - snprintf(test_desc_0, sizeof(test_desc_0), "\tn-cube slice through m-cube (n <= m) test %d.\n", test_num); - MESSAGE(7, ("%s", test_desc_0)); - - /* This statement must be updated if SS_DR_MAX_RANK is changed */ - snprintf(test_desc_1, sizeof(test_desc_1), "\t\tranks: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d.\n", - small_rank, large_rank, offset, (int)dim_selected[0], (int)dim_selected[1], (int)dim_selected[2], - (int)dim_selected[3], (int)dim_selected[4]); - MESSAGE(7, ("%s", test_desc_1)); - - /* copy the edge size into the dims array */ - for (i = 0; i < SS_DR_MAX_RANK; i++) - dims[i] = edge_size; - - /* Create the small n-cube */ - n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL); - CHECK(n_cube_0_sid, FAIL, "H5Screate_simple"); - - /* Create the large n-cube */ - n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL); - CHECK(n_cube_1_sid, FAIL, "H5Screate_simple"); - - /* set up start, stride, count, and block for the hyperslab selection */ - for (i = 0; i < SS_DR_MAX_RANK; i++) { - stride[i] = 2 * edge_size; /* a bit silly in this case */ - count[i] = 1; - if (dim_selected[i]) { - start[i] = 0; - block[i] = edge_size; - } - else { - start[i] = (hsize_t)offset; - block[i] = 1; - } - } - - /* since large rank may be less than SS_DR_MAX_RANK, we may not - * use the entire start, stride, count, and block arrays. This - * is a problem, since it is inconvenient to set up the dim_selected - * array to reflect the large rank, and thus if large_rank < - * SS_DR_MAX_RANK, we need to hide the lower index entries - * from H5Sselect_hyperslab(). - * - * Do this by setting up pointers to the first valid entry in start, - * stride, count, and block below, and pass these pointers in - * to H5Sselect_hyperslab() instead of the array base addresses. - */ - - i = SS_DR_MAX_RANK - large_rank; - assert(i >= 0); - - start_ptr = &(start[i]); - stride_ptr = &(stride[i]); - count_ptr = &(count[i]); - block_ptr = &(block[i]); - - /* select the hyperslab */ - ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET, start_ptr, stride_ptr, count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* setup is done -- run the test: */ - check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid); - VERIFY(check, expected_result, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(n_cube_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(n_cube_1_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_shape_same_dr__full_space_vs_slice() */ - -/**************************************************************** -** -** test_shape_same_dr__run_full_space_vs_slice_tests(): -** -** Run the test_shape_same_dr__full_space_vs_slice() test -** over a variety of ranks and offsets. -** -** At present, we test H5Sselect_shape_same() with -** fully selected 1, 2, 3, and 4 cubes as one parameter, and -** 1, 2, 3, and 4 dimensional slices through a n-cube of rank -** no more than 5 (and at least the rank of the slice). -** We stop at rank 5, as Quincey suggested that it would be -** sufficient. -** -** All the n-cubes will have lengths of the same size, so -** H5Sselect_shape_same() should return true iff: -** -** 1) the rank for the fully selected n cube equals the -** number of dimensions selected in the slice through the -** m-cube (m >= n). -** -** 2) The dimensions selected in the slice through the m-cube -** are the dimensions with the most quickly changing -** indices. -** -****************************************************************/ -static void -test_shape_same_dr__run_full_space_vs_slice_tests(void) -{ - bool dim_selected[5]; - bool expected_result; - int i, j; - int v, w, x, y, z; - int test_num = 0; - int small_rank; - int large_rank; - hsize_t edge_size = 10; - - for (large_rank = 1; large_rank <= 5; large_rank++) { - for (small_rank = 1; small_rank <= large_rank; small_rank++) { - v = 0; - do { - if (v == 0) - dim_selected[0] = false; - else - dim_selected[0] = true; - - w = 0; - do { - if (w == 0) - dim_selected[1] = false; - else - dim_selected[1] = true; - - x = 0; - do { - if (x == 0) - dim_selected[2] = false; - else - dim_selected[2] = true; - - y = 0; - do { - if (y == 0) - dim_selected[3] = false; - else - dim_selected[3] = true; - - z = 0; - do { - if (z == 0) - dim_selected[4] = false; - else - dim_selected[4] = true; - - /* compute the expected result: */ - i = 0; - j = 4; - expected_result = true; - while ((i < small_rank) && expected_result) { - if (!dim_selected[j]) - expected_result = false; - i++; - j--; - } - - while ((i < large_rank) && expected_result) { - if (dim_selected[j]) - expected_result = false; - i++; - j--; - } - - /* everything is set up -- run the tests */ - - test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank, 0, - edge_size, dim_selected, - expected_result); - - test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank, - large_rank / 2, edge_size, - dim_selected, expected_result); - - test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank, - large_rank - 1, edge_size, - dim_selected, expected_result); - - z++; - } while ((z < 2) && (large_rank >= 1)); - - y++; - } while ((y < 2) && (large_rank >= 2)); - - x++; - } while ((x < 2) && (large_rank >= 3)); - - w++; - } while ((w < 2) && (large_rank >= 4)); - - v++; - } while ((v < 2) && (large_rank >= 5)); - } /* end for */ - } /* end for */ -} /* test_shape_same_dr__run_full_space_vs_slice_tests() */ - -/**************************************************************** -** -** test_shape_same_dr__checkerboard(): Tests selection of a -** "checker board" subset of a full n-cube dataspace vs -** a "checker board" n-dimensional slice of an m-cube (m > n). -** in a call to H5Sselect_shape_same(). -** -** Note that this test does not require the n-cube and the -** n-dimensional slice to have the same rank (although -** H5Sselect_shape_same() should always return false if -** they don't). -** -** Per Quincey's suggestion, only test up to 5 dimensional -** spaces. -** -****************************************************************/ -static void -test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, int offset, hsize_t edge_size, - hsize_t checker_size, bool dim_selected[], bool expected_result) -{ - char test_desc_0[128]; - char test_desc_1[256]; - int i; - int dims_selected = 0; - hid_t n_cube_0_sid; /* the checker board selected - * hyper cube - */ - hid_t n_cube_1_sid; /* the hyper cube in which a - * checkerboard slice is selected - */ - hsize_t dims[SS_DR_MAX_RANK]; - hsize_t base_start[2]; - hsize_t start[SS_DR_MAX_RANK]; - hsize_t *start_ptr; - hsize_t base_stride[2]; - hsize_t stride[SS_DR_MAX_RANK]; - hsize_t *stride_ptr; - hsize_t base_count[2]; - hsize_t count[SS_DR_MAX_RANK]; - hsize_t *count_ptr; - hsize_t base_block[2]; - hsize_t block[SS_DR_MAX_RANK]; - hsize_t *block_ptr; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - assert(0 < small_rank); - assert(small_rank <= large_rank); - assert(large_rank <= SS_DR_MAX_RANK); - assert(0 < checker_size); - assert(checker_size <= edge_size); - assert(edge_size <= 1000); - assert(0 <= offset); - assert(offset < (int)edge_size); - - for (i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++) - if (dim_selected[i] == true) - dims_selected++; - - assert(dims_selected >= 0); - assert(dims_selected <= large_rank); - - snprintf(test_desc_0, sizeof(test_desc_0), - "\tcheckerboard n-cube slice through m-cube (n <= m) test %d.\n", test_num); - MESSAGE(7, ("%s", test_desc_0)); - - /* This statement must be updated if SS_DR_MAX_RANK is changed */ - snprintf(test_desc_1, sizeof(test_desc_1), - "\tranks: %d/%d edge/chkr size: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d:%d.\n", small_rank, - large_rank, (int)edge_size, (int)checker_size, offset, (int)dim_selected[0], - (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4], - dims_selected); - MESSAGE(7, ("%s", test_desc_1)); - - /* copy the edge size into the dims array */ - for (i = 0; i < SS_DR_MAX_RANK; i++) - dims[i] = edge_size; - - /* Create the small n-cube */ - n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL); - CHECK(n_cube_0_sid, FAIL, "H5Screate_simple"); - - /* Select a "checkerboard" pattern in the small n-cube. - * - * In the 1-D case, the "checkerboard" would look like this: - * - * * * - - * * - - * * - * - * and in the 2-D case, it would look like this: - * - * * * - - * * - - * * - * * * - - * * - - * * - * - - * * - - * * - - - * - - * * - - * * - - - * * * - - * * - - * * - * * * - - * * - - * * - * - - * * - - * * - - - * - - * * - - * * - - - * * * - - * * - - * * - * * * - - * * - - * * - * - * In both cases, asterisks indicate selected elements, - * and dashes indicate unselected elements. - * - * 3-D and 4-D ascii art is somewhat painful, so I'll - * leave those selections to your imagination. :-) - * - * Note, that since the edge_size and checker_size are - * parameters that are passed in, the selection need - * not look exactly like the selection shown above. - * At present, the function allows checker sizes that - * are not even divisors of the edge size -- thus - * something like the following is also possible: - * - * * * * - - - * * * - - * * * * - - - * * * - - * * * * - - - * * * - - * - - - * * * - - - * - * - - - * * * - - - * - * - - - * * * - - - * - * * * * - - - * * * - - * * * * - - - * * * - - * * * * - - - * * * - - * - - - * * * - - - * - * - * As the above pattern can't be selected in one - * call to H5Sselect_hyperslab(), and since the - * values in the start, stride, count, and block - * arrays will be repeated over all entries in - * the selected space case, and over all selected - * dimensions in the selected hyperslab case, we - * compute these values first and store them in - * in the base_start, base_stride, base_count, - * and base_block arrays. - */ - - base_start[0] = 0; - base_start[1] = checker_size; - - base_stride[0] = 2 * checker_size; - base_stride[1] = 2 * checker_size; - - /* Note that the following computation depends on the C99 - * requirement that integer division discard any fraction - * (truncation towards zero) to function correctly. As we - * now require C99, this shouldn't be a problem, but noting - * it may save us some pain if we are ever obliged to support - * pre-C99 compilers again. - */ - - base_count[0] = edge_size / (checker_size * 2); - if ((edge_size % (checker_size * 2)) > 0) - base_count[0]++; - - base_count[1] = (edge_size - checker_size) / (checker_size * 2); - if (((edge_size - checker_size) % (checker_size * 2)) > 0) - base_count[1]++; - - base_block[0] = checker_size; - base_block[1] = checker_size; - - /* now setup start, stride, count, and block arrays for - * the first call to H5Sselect_hyperslab(). - */ - for (i = 0; i < SS_DR_MAX_RANK; i++) { - start[i] = base_start[0]; - stride[i] = base_stride[0]; - count[i] = base_count[0]; - block[i] = base_block[0]; - } /* end for */ - - ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* if small_rank == 1, or if edge_size == checker_size, we - * are done, as either there is no added dimension in which - * to place offset selected "checkers". - * - * Otherwise, set up start, stride, count and block, and - * make the additional selection. - */ - - if ((small_rank > 1) && (checker_size < edge_size)) { - for (i = 0; i < SS_DR_MAX_RANK; i++) { - start[i] = base_start[1]; - stride[i] = base_stride[1]; - count[i] = base_count[1]; - block[i] = base_block[1]; - } /* end for */ - - ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end if */ - - /* Weirdness alert: - * - * Some how, it seems that selections can extend beyond the - * boundaries of the target dataspace -- hence the following - * code to manually clip the selection back to the dataspace - * proper. - */ - for (i = 0; i < SS_DR_MAX_RANK; i++) { - start[i] = 0; - stride[i] = edge_size; - count[i] = 1; - block[i] = edge_size; - } /* end for */ - - ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the large n-cube */ - n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL); - CHECK(n_cube_1_sid, FAIL, "H5Screate_simple"); - - /* Now select the checkerboard selection in the (possibly larger) n-cube. - * - * Since we have already calculated the base start, stride, count, - * and block, reuse the values in setting up start, stride, count, - * and block. - */ - for (i = 0; i < SS_DR_MAX_RANK; i++) { - if (dim_selected[i]) { - start[i] = base_start[0]; - stride[i] = base_stride[0]; - count[i] = base_count[0]; - block[i] = base_block[0]; - } /* end if */ - else { - start[i] = (hsize_t)offset; - stride[i] = (hsize_t)(2 * edge_size); - count[i] = 1; - block[i] = 1; - } /* end else */ - } /* end for */ - - /* Since large rank may be less than SS_DR_MAX_RANK, we may not - * use the entire start, stride, count, and block arrays. This - * is a problem, since it is inconvenient to set up the dim_selected - * array to reflect the large rank, and thus if large_rank < - * SS_DR_MAX_RANK, we need to hide the lower index entries - * from H5Sselect_hyperslab(). - * - * Do this by setting up pointers to the first valid entry in start, - * stride, count, and block below, and pass these pointers in - * to H5Sselect_hyperslab() instead of the array base addresses. - */ - - i = SS_DR_MAX_RANK - large_rank; - assert(i >= 0); - - start_ptr = &(start[i]); - stride_ptr = &(stride[i]); - count_ptr = &(count[i]); - block_ptr = &(block[i]); - - /* select the hyperslab */ - ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET, start_ptr, stride_ptr, count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* As before, if the number of dimensions selected is less than or - * equal to 1, or if edge_size == checker_size, we are done, as - * either there is no added dimension in which to place offset selected - * "checkers", or the hyperslab is completely occupied by one - * "checker". - * - * Otherwise, set up start, stride, count and block, and - * make the additional selection. - */ - if ((dims_selected > 1) && (checker_size < edge_size)) { - for (i = 0; i < SS_DR_MAX_RANK; i++) { - if (dim_selected[i]) { - start[i] = base_start[1]; - stride[i] = base_stride[1]; - count[i] = base_count[1]; - block[i] = base_block[1]; - } /* end if */ - else { - start[i] = (hsize_t)offset; - stride[i] = (hsize_t)(2 * edge_size); - count[i] = 1; - block[i] = 1; - } /* end else */ - } /* end for */ - - ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR, start_ptr, stride_ptr, count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end if */ - - /* Weirdness alert: - * - * Again, it seems that selections can extend beyond the - * boundaries of the target dataspace -- hence the following - * code to manually clip the selection back to the dataspace - * proper. - */ - for (i = 0; i < SS_DR_MAX_RANK; i++) { - start[i] = 0; - stride[i] = edge_size; - count[i] = 1; - block[i] = edge_size; - } /* end for */ - - ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* setup is done -- run the test: */ - check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid); - VERIFY(check, expected_result, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(n_cube_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(n_cube_1_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_shape_same_dr__checkerboard() */ - -/**************************************************************** -** -** test_shape_same_dr__run_checkerboard_tests(): -** -** In this set of tests, we test H5Sselect_shape_same() -** with a "checkerboard" selection of 1, 2, 3, and 4 cubes as -** one parameter, and 1, 2, 3, and 4 dimensional checkerboard -** slices through a n-cube of rank no more than 5 (and at -** least the rank of the slice). -** -** All the n-cubes will have lengths of the same size, so -** H5Sselect_shape_same() should return true iff: -** -** 1) the rank of the n cube equals the number of dimensions -** selected in the checker board slice through the m-cube -** (m >= n). -** -** 2) The dimensions selected in the checkerboard slice -** through the m-cube are the dimensions with the most -** quickly changing indices. -** -****************************************************************/ -static void -test_shape_same_dr__run_checkerboard_tests(void) -{ - bool dim_selected[5]; - bool expected_result; - int i, j; - int v, w, x, y, z; - int test_num = 0; - int small_rank; - int large_rank; - - for (large_rank = 1; large_rank <= 5; large_rank++) { - for (small_rank = 1; small_rank <= large_rank; small_rank++) { - v = 0; - do { - if (v == 0) - dim_selected[0] = false; - else - dim_selected[0] = true; - - w = 0; - do { - if (w == 0) - dim_selected[1] = false; - else - dim_selected[1] = true; - - x = 0; - do { - if (x == 0) - dim_selected[2] = false; - else - dim_selected[2] = true; - - y = 0; - do { - if (y == 0) - dim_selected[3] = false; - else - dim_selected[3] = true; - - z = 0; - do { - if (z == 0) - dim_selected[4] = false; - else - dim_selected[4] = true; - - /* compute the expected result: */ - i = 0; - j = 4; - expected_result = true; - while ((i < small_rank) && expected_result) { - if (!dim_selected[j]) - expected_result = false; - i++; - j--; - } /* end while */ - - while ((i < large_rank) && expected_result) { - if (dim_selected[j]) - expected_result = false; - i++; - j--; - } /* end while */ - - /* everything is set up -- run the tests */ - - /* run test with edge size 16, checker - * size 1, and a variety of offsets - */ - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 0, - /* edge_size */ 16, - /* checker_size */ 1, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 5, - /* edge_size */ 16, - /* checker_size */ 1, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 15, - /* edge_size */ 16, - /* checker_size */ 1, dim_selected, - expected_result); - - /* run test with edge size 10, checker - * size 2, and a variety of offsets - */ - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 0, - /* edge_size */ 10, - /* checker_size */ 2, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 5, - /* edge_size */ 10, - /* checker_size */ 2, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 9, - /* edge_size */ 10, - /* checker_size */ 2, dim_selected, - expected_result); - - /* run test with edge size 10, checker - * size 3, and a variety of offsets - */ - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 0, - /* edge_size */ 10, - /* checker_size */ 3, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 5, - /* edge_size */ 10, - /* checker_size */ 3, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 9, - /* edge_size */ 10, - /* checker_size */ 3, dim_selected, - expected_result); - - /* run test with edge size 8, checker - * size 8, and a variety of offsets - */ - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 0, - /* edge_size */ 8, - /* checker_size */ 8, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 4, - /* edge_size */ 8, - /* checker_size */ 8, dim_selected, - expected_result); - - test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank, - /* offset */ 7, - /* edge_size */ 8, - /* checker_size */ 8, dim_selected, - expected_result); - - z++; - } while ((z < 2) && (large_rank >= 1)); - - y++; - } while ((y < 2) && (large_rank >= 2)); - - x++; - } while ((x < 2) && (large_rank >= 3)); - - w++; - } while ((w < 2) && (large_rank >= 4)); - - v++; - } while ((v < 2) && (large_rank >= 5)); - } /* end for */ - } /* end for */ -} /* test_shape_same_dr__run_checkerboard_tests() */ - -/**************************************************************** -** -** test_shape_same_dr__irregular(): -** -** Tests selection of an "irregular" subset of a full -** n-cube dataspace vs an identical "irregular" subset -** of an n-dimensional slice of an m-cube (m > n). -** in a call to H5Sselect_shape_same(). -** -** Note that this test does not require the n-cube and the -** n-dimensional slice to have the same rank (although -** H5Sselect_shape_same() should always return false if -** they don't). -** -****************************************************************/ -static void -test_shape_same_dr__irregular(int test_num, int small_rank, int large_rank, int pattern_offset, - int slice_offset, bool dim_selected[], bool expected_result) -{ - char test_desc_0[128]; - char test_desc_1[256]; - int edge_size = 10; - int i; - int j; - int k; - int dims_selected = 0; - hid_t n_cube_0_sid; /* the hyper cube containing - * an irregular selection - */ - hid_t n_cube_1_sid; /* the hyper cube in which a - * slice contains an irregular - * selection. - */ - hsize_t dims[SS_DR_MAX_RANK]; - hsize_t start_0[SS_DR_MAX_RANK] = {2, 2, 2, 2, 5}; - hsize_t stride_0[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; - hsize_t count_0[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; - hsize_t block_0[SS_DR_MAX_RANK] = {2, 2, 2, 2, 3}; - - hsize_t start_1[SS_DR_MAX_RANK] = {2, 2, 2, 5, 2}; - hsize_t stride_1[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; - hsize_t count_1[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; - hsize_t block_1[SS_DR_MAX_RANK] = {2, 2, 2, 3, 2}; - - hsize_t start_2[SS_DR_MAX_RANK] = {2, 2, 5, 2, 2}; - hsize_t stride_2[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; - hsize_t count_2[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; - hsize_t block_2[SS_DR_MAX_RANK] = {2, 2, 3, 2, 2}; - - hsize_t start_3[SS_DR_MAX_RANK] = {2, 5, 2, 2, 2}; - hsize_t stride_3[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; - hsize_t count_3[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; - hsize_t block_3[SS_DR_MAX_RANK] = {2, 3, 2, 2, 2}; - - hsize_t start_4[SS_DR_MAX_RANK] = {5, 2, 2, 2, 2}; - hsize_t stride_4[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; - hsize_t count_4[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; - hsize_t block_4[SS_DR_MAX_RANK] = {3, 2, 2, 2, 2}; - - hsize_t clip_start[SS_DR_MAX_RANK] = {0, 0, 0, 0, 0}; - hsize_t clip_stride[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; - hsize_t clip_count[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1}; - hsize_t clip_block[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10}; - - hsize_t *(starts[SS_DR_MAX_RANK]) = {start_0, start_1, start_2, start_3, start_4}; - hsize_t *(strides[SS_DR_MAX_RANK]) = {stride_0, stride_1, stride_2, stride_3, stride_4}; - hsize_t *(counts[SS_DR_MAX_RANK]) = {count_0, count_1, count_2, count_3, count_4}; - hsize_t *(blocks[SS_DR_MAX_RANK]) = {block_0, block_1, block_2, block_3, block_4}; - - hsize_t start[SS_DR_MAX_RANK]; - hsize_t *start_ptr; - hsize_t stride[SS_DR_MAX_RANK]; - hsize_t *stride_ptr; - hsize_t count[SS_DR_MAX_RANK]; - hsize_t *count_ptr; - hsize_t block[SS_DR_MAX_RANK]; - hsize_t *block_ptr; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - assert(0 < small_rank); - assert(small_rank <= large_rank); - assert(large_rank <= SS_DR_MAX_RANK); - assert(9 <= edge_size); - assert(edge_size <= 1000); - assert(0 <= slice_offset); - assert(slice_offset < edge_size); - assert(-2 <= pattern_offset); - assert(pattern_offset <= 2); - - for (i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++) - if (dim_selected[i] == true) - dims_selected++; - - assert(dims_selected >= 0); - assert(dims_selected <= large_rank); - - snprintf(test_desc_0, sizeof(test_desc_0), - "\tirregular sub set of n-cube slice through m-cube (n <= m) test %d.\n", test_num); - MESSAGE(7, ("%s", test_desc_0)); - - /* This statement must be updated if SS_DR_MAX_RANK is changed */ - snprintf(test_desc_1, sizeof(test_desc_1), - "\tranks: %d/%d edge: %d s/p offset: %d/%d dim_selected: %d/%d/%d/%d/%d:%d.\n", small_rank, - large_rank, edge_size, slice_offset, pattern_offset, (int)dim_selected[0], (int)dim_selected[1], - (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4], dims_selected); - MESSAGE(7, ("%s", test_desc_1)); - - /* copy the edge size into the dims array */ - for (i = 0; i < SS_DR_MAX_RANK; i++) - dims[i] = (hsize_t)edge_size; - - /* Create the small n-cube */ - n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL); - CHECK(n_cube_0_sid, FAIL, "H5Screate_simple"); - - /* Select an "irregular" pattern in the small n-cube. This - * pattern can be though of a set of four 3 x 2 x 2 X 2 - * four dimensional prisims, each parallel to one of the - * axies and none of them intersecting with the other. - * - * In the lesser dimensional cases, this 4D pattern is - * projected onto the lower dimensional space. - * - * In the 1-D case, the projection of the pattern looks - * like this: - * - * - - * * - * * * - - - * 0 1 2 3 4 5 6 7 8 9 x - * - * and in the 2-D case, it would look like this: - * - * - * y - * 9 - - - - - - - - - - - * 8 - - - - - - - - - - - * 7 - - * * - - - - - - - * 6 - - * * - - - - - - - * 5 - - * * - - - - - - - * 4 - - - - - - - - - - - * 3 - - * * - * * * - - - * 2 - - * * - * * * - - - * 1 - - - - - - - - - - - * 0 - - - - - - - - - - - * 0 1 2 3 4 5 6 7 8 9 x - * - * In both cases, asterisks indicate selected elements, - * and dashes indicate unselected elements. - * - * Note that is this case, since the edge size is fixed, - * the pattern does not change. However, we do use the - * displacement parameter to allow it to be moved around - * within the n-cube or hyperslab. - */ - - /* first, ensure that the small n-cube has no selection */ - ret = H5Sselect_none(n_cube_0_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* now, select the irregular pattern */ - for (i = 0; i < SS_DR_MAX_RANK; i++) { - ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR, starts[i], strides[i], counts[i], blocks[i]); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end for */ - - /* finally, clip the selection to ensure that it lies fully - * within the n-cube. - */ - ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND, clip_start, clip_stride, clip_count, clip_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create the large n-cube */ - n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL); - CHECK(n_cube_1_sid, FAIL, "H5Screate_simple"); - - /* Ensure that the large n-cube has no selection */ - H5Sselect_none(n_cube_1_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Since large rank may be less than SS_DR_MAX_RANK, we may not - * use the entire start, stride, count, and block arrays. This - * is a problem, since it is inconvenient to set up the dim_selected - * array to reflect the large rank, and thus if large_rank < - * SS_DR_MAX_RANK, we need to hide the lower index entries - * from H5Sselect_hyperslab(). - * - * Do this by setting up pointers to the first valid entry in start, - * stride, count, and block below, and pass these pointers in - * to H5Sselect_hyperslab() instead of the array base addresses. - */ - - i = SS_DR_MAX_RANK - large_rank; - assert(i >= 0); - - start_ptr = &(start[i]); - stride_ptr = &(stride[i]); - count_ptr = &(count[i]); - block_ptr = &(block[i]); - - /* Now select the irregular selection in the (possibly larger) n-cube. - * - * Basic idea is to project the pattern used in the smaller n-cube - * onto the dimensions selected in the larger n-cube, with the displacement - * specified. - */ - for (i = 0; i < SS_DR_MAX_RANK; i++) { - j = 0; - for (k = 0; k < SS_DR_MAX_RANK; k++) { - if (dim_selected[k]) { - start[k] = (starts[i])[j] + (hsize_t)pattern_offset; - stride[k] = (strides[i])[j]; - count[k] = (counts[i])[j]; - block[k] = (blocks[i])[j]; - j++; - } /* end if */ - else { - start[k] = (hsize_t)slice_offset; - stride[k] = (hsize_t)(2 * edge_size); - count[k] = 1; - block[k] = 1; - } /* end else */ - } /* end for */ - - /* select the hyperslab */ - ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR, start_ptr, stride_ptr, count_ptr, block_ptr); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - } /* end for */ - - /* it is possible that the selection extends beyond the dataspace. - * clip the selection to ensure that it doesn't. - */ - ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND, clip_start, clip_stride, clip_count, clip_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* setup is done -- run the test: */ - check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid); - VERIFY(check, expected_result, "H5Sselect_shape_same"); - - /* Close dataspaces */ - ret = H5Sclose(n_cube_0_sid); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(n_cube_1_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_shape_same_dr__irregular() */ - -/**************************************************************** -** -** test_shape_same_dr__run_irregular_tests(): -** -** In this set of tests, we test H5Sselect_shape_same() -** with an "irregular" subselection of 1, 2, 3, and 4 cubes as -** one parameter, and irregular subselections of 1, 2, 3, -** and 4 dimensional slices through a n-cube of rank no more -** than 5 (and at least the rank of the slice) as the other. -** Note that the "irregular" selection may be offset between -** the n-cube and the slice. -** -** All the irregular selections will be identical (modulo rank) -** so H5Sselect_shape_same() should return true iff: -** -** 1) the rank of the n cube equals the number of dimensions -** selected in the irregular slice through the m-cube -** (m >= n). -** -** 2) The dimensions selected in the irregular slice -** through the m-cube are the dimensions with the most -** quickly changing indices. -** -****************************************************************/ -static void -test_shape_same_dr__run_irregular_tests(void) -{ - bool dim_selected[5]; - bool expected_result; - int i, j; - int v, w, x, y, z; - int test_num = 0; - int small_rank; - int large_rank; - - for (large_rank = 1; large_rank <= 5; large_rank++) { - for (small_rank = 1; small_rank <= large_rank; small_rank++) { - v = 0; - do { - if (v == 0) - dim_selected[0] = false; - else - dim_selected[0] = true; - - w = 0; - do { - if (w == 0) - dim_selected[1] = false; - else - dim_selected[1] = true; - - x = 0; - do { - if (x == 0) - dim_selected[2] = false; - else - dim_selected[2] = true; - - y = 0; - do { - if (y == 0) - dim_selected[3] = false; - else - dim_selected[3] = true; - - z = 0; - do { - if (z == 0) - dim_selected[4] = false; - else - dim_selected[4] = true; - - /* compute the expected result: */ - i = 0; - j = 4; - expected_result = true; - while ((i < small_rank) && expected_result) { - if (!dim_selected[j]) - expected_result = false; - i++; - j--; - } /* end while */ - - while ((i < large_rank) && expected_result) { - if (dim_selected[j]) - expected_result = false; - i++; - j--; - } /* end while */ - - /* everything is set up -- run the tests */ - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ -2, - /* slice_offset */ 0, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ -2, - /* slice_offset */ 4, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ -2, - /* slice_offset */ 9, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ 0, - /* slice_offset */ 0, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ 0, - /* slice_offset */ 6, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ 0, - /* slice_offset */ 9, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ 2, - /* slice_offset */ 0, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ 2, - /* slice_offset */ 5, dim_selected, - expected_result); - - test_shape_same_dr__irregular(test_num++, small_rank, large_rank, - /* pattern_offset */ 2, - /* slice_offset */ 9, dim_selected, - expected_result); - - z++; - } while ((z < 2) && (large_rank >= 1)); - - y++; - } while ((y < 2) && (large_rank >= 2)); - - x++; - } while ((x < 2) && (large_rank >= 3)); - - w++; - } while ((w < 2) && (large_rank >= 4)); - - v++; - } while ((v < 2) && (large_rank >= 5)); - } /* end for */ - } /* end for */ -} /* test_shape_same_dr__run_irregular_tests() */ - -/**************************************************************** -** -** test_shape_same_dr(): Tests selections on dataspace with -** different ranks, to verify that "shape same" routine -** is now handling this case correctly. -** -****************************************************************/ -static void -test_shape_same_dr(void) -{ - /* Output message about test being performed */ - MESSAGE(6, ("Testing Same Shape/Different Rank Comparisons\n")); - - /* first run some smoke checks */ - test_shape_same_dr__smoke_check_1(); - test_shape_same_dr__smoke_check_2(); - test_shape_same_dr__smoke_check_3(); - test_shape_same_dr__smoke_check_4(); - - /* now run more intensive tests. */ - test_shape_same_dr__run_full_space_vs_slice_tests(); - test_shape_same_dr__run_checkerboard_tests(); - test_shape_same_dr__run_irregular_tests(); -} /* test_shape_same_dr() */ - -/**************************************************************** -** -** test_space_rebuild(): Tests selection rebuild routine, -** We will test whether selection in span-tree form can be rebuilt -** into a regular selection. -** -** -****************************************************************/ -static void -test_space_rebuild(void) -{ - /* regular space IDs in span-tree form */ - hid_t sid_reg1, sid_reg2, sid_reg3, sid_reg4, sid_reg5; - - /* Original regular Space IDs */ - hid_t sid_reg_ori1, sid_reg_ori2, sid_reg_ori3, sid_reg_ori4, sid_reg_ori5; - - /* Irregular space IDs */ - hid_t sid_irreg1, sid_irreg2, sid_irreg3, sid_irreg4, sid_irreg5; - - /* rebuild status state */ -#if 0 - H5S_diminfo_valid_t rebuild_stat1, rebuild_stat2; - htri_t rebuild_check; -#endif - herr_t ret; - - /* dimensions of rank 1 to rank 5 */ - hsize_t dims1[] = {SPACERE1_DIM0}; - hsize_t dims2[] = {SPACERE2_DIM0, SPACERE2_DIM1}; - hsize_t dims3[] = {SPACERE3_DIM0, SPACERE3_DIM1, SPACERE3_DIM2}; - hsize_t dims4[] = {SPACERE4_DIM0, SPACERE4_DIM1, SPACERE4_DIM2, SPACERE4_DIM3}; - hsize_t dims5[] = {SPACERE5_DIM0, SPACERE5_DIM1, SPACERE5_DIM2, SPACERE5_DIM3, SPACERE5_DIM4}; - - /* The start of the hyperslab */ - hsize_t start1[SPACERE1_RANK], start2[SPACERE2_RANK], start3[SPACERE3_RANK], start4[SPACERE4_RANK], - start5[SPACERE5_RANK]; - - /* The stride of the hyperslab */ - hsize_t stride1[SPACERE1_RANK], stride2[SPACERE2_RANK], stride3[SPACERE3_RANK], stride4[SPACERE4_RANK], - stride5[SPACERE5_RANK]; - - /* The number of blocks for the hyperslab */ - hsize_t count1[SPACERE1_RANK], count2[SPACERE2_RANK], count3[SPACERE3_RANK], count4[SPACERE4_RANK], - count5[SPACERE5_RANK]; - - /* The size of each block for the hyperslab */ - hsize_t block1[SPACERE1_RANK], block2[SPACERE2_RANK], block3[SPACERE3_RANK], block4[SPACERE4_RANK], - block5[SPACERE5_RANK]; - - /* Declarations for special test of rebuild */ - hid_t sid_spec; - - /* Output message about test being performed */ - MESSAGE(6, ("Testing functionality to rebuild regular hyperslab selection\n")); - - MESSAGE(7, ("Testing functionality to rebuild 1-D hyperslab selection\n")); - - /* Create 1-D dataspace */ - sid_reg1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL); - sid_reg_ori1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL); - - /* Build up the original one dimensional regular selection */ - start1[0] = 1; - count1[0] = 3; - stride1[0] = 5; - block1[0] = 4; - ret = H5Sselect_hyperslab(sid_reg_ori1, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Build up one dimensional regular selection with H5_SELECT_OR, - inside HDF5, it will be treated as an irregular selection. */ - - start1[0] = 1; - count1[0] = 2; - stride1[0] = 5; - block1[0] = 4; - ret = H5Sselect_hyperslab(sid_reg1, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start1[0] = 11; - count1[0] = 1; - stride1[0] = 5; - block1[0] = 4; - ret = H5Sselect_hyperslab(sid_reg1, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_reg1, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 and rebuild_stat2 should be - * H5S_DIMINFO_VALID_YES. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (ret != FAIL) { - /* In this case, rebuild_check should be true. */ - rebuild_check = H5Sselect_shape_same(sid_reg1, sid_reg_ori1); - CHECK(rebuild_check, false, "H5Sselect_shape_same"); - } -#endif - /* For irregular hyperslab */ - sid_irreg1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL); - - /* Build up one dimensional irregular selection with H5_SELECT_OR */ - start1[0] = 1; - count1[0] = 2; - stride1[0] = 5; - block1[0] = 4; - ret = H5Sselect_hyperslab(sid_irreg1, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start1[0] = 12; /* Just one position switch */ - count1[0] = 1; - stride1[0] = 5; - block1[0] = 4; - ret = H5Sselect_hyperslab(sid_irreg1, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_irreg1, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and - * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - MESSAGE(7, ("Testing functionality to rebuild 2-D hyperslab selection\n")); - /* Create 2-D dataspace */ - sid_reg2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL); - sid_reg_ori2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL); - - /* Build up the original two dimensional regular selection */ - start2[0] = 2; - count2[0] = 2; - stride2[0] = 7; - block2[0] = 5; - start2[1] = 1; - count2[1] = 3; - stride2[1] = 3; - block2[1] = 2; - - ret = H5Sselect_hyperslab(sid_reg_ori2, H5S_SELECT_SET, start2, stride2, count2, block2); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Build up two dimensional regular selection with H5_SELECT_OR, inside HDF5, - it will be treated as an irregular selection. */ - - start2[1] = 1; - count2[1] = 2; - stride2[1] = 3; - block2[1] = 2; - - ret = H5Sselect_hyperslab(sid_reg2, H5S_SELECT_SET, start2, stride2, count2, block2); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start2[1] = 7; /* 7 = start(1) + count(2) * stride(3) */ - count2[1] = 1; - stride2[1] = 3; - block2[1] = 2; - - ret = H5Sselect_hyperslab(sid_reg2, H5S_SELECT_OR, start2, stride2, count2, block2); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_reg2, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 and rebuild_stat2 should be - * H5S_DIMINFO_VALID_YES. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } /* end if */ - if (ret != FAIL) { - /* In this case, rebuild_check should be true. */ - rebuild_check = H5Sselect_shape_same(sid_reg2, sid_reg_ori2); - CHECK(rebuild_check, false, "H5Sselect_shape_same"); - } -#endif - /* 2-D irregular case */ - sid_irreg2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL); - /* Build up two dimensional irregular selection with H5_SELECT_OR */ - - start2[0] = 2; - count2[0] = 2; - stride2[0] = 7; - block2[0] = 5; - start2[1] = 1; - count2[1] = 1; - stride2[1] = 3; - block2[1] = 2; - ret = H5Sselect_hyperslab(sid_irreg2, H5S_SELECT_SET, start2, stride2, count2, block2); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start2[1] = 4; - count2[1] = 2; - stride2[1] = 4; - block2[1] = 3; /* Just add one element for the block */ - - ret = H5Sselect_hyperslab(sid_irreg2, H5S_SELECT_OR, start2, stride2, count2, block2); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_irreg2, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and - * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - MESSAGE(7, ("Testing functionality to rebuild 3-D hyperslab selection\n")); - - /* Create 3-D dataspace */ - sid_reg3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL); - sid_reg_ori3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL); - - /* Build up the original three dimensional regular selection */ - start3[0] = 2; - count3[0] = 2; - stride3[0] = 3; - block3[0] = 2; - start3[1] = 1; - count3[1] = 3; - stride3[1] = 3; - block3[1] = 2; - - start3[2] = 1; - count3[2] = 2; - stride3[2] = 4; - block3[2] = 2; - - ret = H5Sselect_hyperslab(sid_reg_ori3, H5S_SELECT_SET, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Build up three dimensional regular selection with H5_SELECT_OR, inside HDF5, - it will be treated as an irregular selection. */ - start3[2] = 1; - count3[2] = 1; - stride3[2] = 4; - block3[2] = 2; - - ret = H5Sselect_hyperslab(sid_reg3, H5S_SELECT_SET, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start3[2] = 5; - count3[2] = 1; - stride3[2] = 4; - block3[2] = 2; - - ret = H5Sselect_hyperslab(sid_reg3, H5S_SELECT_OR, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_reg3, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 and rebuild_stat2 should be - * H5S_DIMINFO_VALID_YES. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (ret != FAIL) { - /* In this case, rebuild_check should be true. */ - rebuild_check = H5Sselect_shape_same(sid_reg3, sid_reg_ori3); - CHECK(rebuild_check, false, "H5Sselect_shape_same"); - } -#endif - - sid_irreg3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL); - - /* Build up three dimensional irregular selection with H5_SELECT_OR */ - start3[0] = 2; - count3[0] = 2; - stride3[0] = 3; - block3[0] = 2; - start3[1] = 1; - count3[1] = 3; - stride3[1] = 3; - block3[1] = 2; - - start3[2] = 1; - count3[2] = 2; - stride3[2] = 2; - block3[2] = 1; - - ret = H5Sselect_hyperslab(sid_irreg3, H5S_SELECT_SET, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start3[2] = 3; - count3[2] = 2; - stride3[2] = 3; /* Just add one element for the stride */ - block3[2] = 1; - - ret = H5Sselect_hyperslab(sid_irreg3, H5S_SELECT_OR, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_irreg3, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and - * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - MESSAGE(7, ("Testing functionality to rebuild 4-D hyperslab selection\n")); - - /* Create 4-D dataspace */ - sid_reg4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL); - sid_reg_ori4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL); - - /* Build up the original four dimensional regular selection */ - start4[0] = 2; - count4[0] = 2; - stride4[0] = 3; - block4[0] = 2; - - start4[1] = 1; - count4[1] = 3; - stride4[1] = 3; - block4[1] = 2; - - start4[2] = 1; - count4[2] = 2; - stride4[2] = 4; - block4[2] = 2; - - start4[3] = 1; - count4[3] = 2; - stride4[3] = 4; - block4[3] = 2; - - ret = H5Sselect_hyperslab(sid_reg_ori4, H5S_SELECT_SET, start4, stride4, count4, block4); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Build up four dimensional regular selection with H5_SELECT_OR, inside HDF5, - it will be treated as an irregular selection. */ - start4[3] = 1; - count4[3] = 1; - stride4[3] = 4; - block4[3] = 2; - - ret = H5Sselect_hyperslab(sid_reg4, H5S_SELECT_SET, start4, stride4, count4, block4); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start4[3] = 5; - count4[3] = 1; - stride4[3] = 4; - block4[3] = 2; - - ret = H5Sselect_hyperslab(sid_reg4, H5S_SELECT_OR, start4, stride4, count4, block4); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - -#if 0 - ret = H5S__get_rebuild_status_test(sid_reg4, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 and rebuild_stat2 should be - * H5S_DIMINFO_VALID_YES. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (ret != FAIL) { - /* In this case, rebuild_check should be true. */ - rebuild_check = H5Sselect_shape_same(sid_reg4, sid_reg_ori4); - CHECK(rebuild_check, false, "H5Sselect_shape_same"); - } -#endif - - /* Testing irregular selection */ - sid_irreg4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL); - - /* Build up four dimensional irregular selection with H5_SELECT_OR */ - start4[0] = 2; - count4[0] = 2; - stride4[0] = 3; - block4[0] = 2; - start4[1] = 1; - count4[1] = 3; - stride4[1] = 3; - block4[1] = 2; - - start4[2] = 1; - count4[2] = 1; - stride4[2] = 4; - block4[2] = 2; - - start4[3] = 1; - count4[3] = 2; - stride4[3] = 4; - block4[3] = 2; /* sub-block is one element difference */ - - ret = H5Sselect_hyperslab(sid_irreg4, H5S_SELECT_SET, start4, stride4, count4, block4); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start4[2] = 5; - count4[2] = 1; - stride4[2] = 4; - block4[2] = 2; - - start4[3] = 1; - count4[3] = 2; - stride4[3] = 4; - block4[3] = 3; /* sub-block is one element difference */ - - ret = H5Sselect_hyperslab(sid_irreg4, H5S_SELECT_OR, start4, stride4, count4, block4); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_irreg4, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and - * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - MESSAGE(7, ("Testing functionality to rebuild 5-D hyperslab selection\n")); - - /* Create 5-D dataspace */ - sid_reg5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL); - sid_reg_ori5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL); - - /* Build up the original five dimensional regular selection */ - start5[0] = 2; - count5[0] = 2; - stride5[0] = 3; - block5[0] = 2; - - start5[1] = 1; - count5[1] = 3; - stride5[1] = 3; - block5[1] = 2; - - start5[2] = 1; - count5[2] = 2; - stride5[2] = 4; - block5[2] = 2; - - start5[3] = 1; - count5[3] = 2; - stride5[3] = 4; - block5[3] = 2; - - start5[4] = 1; - count5[4] = 2; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_reg_ori5, H5S_SELECT_SET, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Build up five dimensional regular selection with H5_SELECT_OR, inside HDF5, - it will be treated as an irregular selection. */ - start5[4] = 1; - count5[4] = 1; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_reg5, H5S_SELECT_SET, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start5[4] = 5; - count5[4] = 1; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_reg5, H5S_SELECT_OR, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - -#if 0 - ret = H5S__get_rebuild_status_test(sid_reg5, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 and rebuild_stat2 should be - * H5S_DIMINFO_VALID_YES. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (ret != FAIL) { - /* In this case, rebuild_check should be true. */ - rebuild_check = H5Sselect_shape_same(sid_reg5, sid_reg_ori5); - CHECK(rebuild_check, false, "H5Sselect_shape_same"); - } -#endif - - sid_irreg5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL); - - /* Build up five dimensional irregular selection with H5_SELECT_OR */ - start5[0] = 2; - count5[0] = 2; - stride5[0] = 3; - block5[0] = 2; - - start5[1] = 1; - count5[1] = 3; - stride5[1] = 3; - block5[1] = 2; - - start5[2] = 1; - count5[2] = 2; - stride5[2] = 4; - block5[2] = 2; - - start5[3] = 1; - count5[3] = 1; - stride5[3] = 4; - block5[3] = 2; - - start5[4] = 2; /* One element difference */ - count5[4] = 1; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_irreg5, H5S_SELECT_SET, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start5[3] = 5; - count5[3] = 1; - stride5[3] = 4; - block5[3] = 2; - - start5[4] = 1; /* One element difference */ - count5[4] = 2; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_irreg5, H5S_SELECT_OR, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_irreg5, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and - * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - /* We use 5-D to test a special case with - rebuilding routine true, false and true */ - sid_spec = H5Screate_simple(SPACERE5_RANK, dims5, NULL); - - /* Build up the original five dimensional regular selection */ - start5[0] = 2; - count5[0] = 2; - stride5[0] = 3; - block5[0] = 2; - - start5[1] = 1; - count5[1] = 3; - stride5[1] = 3; - block5[1] = 2; - - start5[2] = 1; - count5[2] = 2; - stride5[2] = 4; - block5[2] = 2; - - start5[3] = 1; - count5[3] = 2; - stride5[3] = 4; - block5[3] = 2; - - start5[4] = 1; - count5[4] = 1; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_SET, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 and rebuild_stat2 should both be - * H5S_DIMINFO_VALID_YES. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - /* Adding some selections to make it real irregular */ - start5[3] = 1; - count5[3] = 1; - stride5[3] = 4; - block5[3] = 2; - - start5[4] = 5; - count5[4] = 1; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_OR, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and - * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - /* Add more selections to make it regular again */ - start5[3] = 5; - count5[3] = 1; - stride5[3] = 4; - block5[3] = 2; - - start5[4] = 5; - count5[4] = 1; - stride5[4] = 4; - block5[4] = 2; - - ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_OR, start5, stride5, count5, block5); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and - * rebuild_stat2 should be H5S_DIMINFO_VALID_YES. */ - if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } - /* No need to do shape comparison */ -#endif - - H5Sclose(sid_reg1); - CHECK(ret, FAIL, "H5Sclose"); - H5Sclose(sid_irreg1); - CHECK(ret, FAIL, "H5Sclose"); - - H5Sclose(sid_reg2); - CHECK(ret, FAIL, "H5Sclose"); - H5Sclose(sid_irreg2); - CHECK(ret, FAIL, "H5Sclose"); - - H5Sclose(sid_reg3); - CHECK(ret, FAIL, "H5Sclose"); - H5Sclose(sid_irreg3); - CHECK(ret, FAIL, "H5Sclose"); - - H5Sclose(sid_reg4); - CHECK(ret, FAIL, "H5Sclose"); - H5Sclose(sid_irreg4); - CHECK(ret, FAIL, "H5Sclose"); - - H5Sclose(sid_reg5); - CHECK(ret, FAIL, "H5Sclose"); - H5Sclose(sid_irreg5); - CHECK(ret, FAIL, "H5Sclose"); - - H5Sclose(sid_spec); - CHECK(ret, FAIL, "H5Sclose"); -} - -/**************************************************************** -** -** test_space_update_diminfo(): Tests selection diminfo update -** routine. We will test whether regular selections can be -** quickly updated when the selection is modified. -** -** -****************************************************************/ -static void -test_space_update_diminfo(void) -{ - hid_t space_id; /* Dataspace id */ -#if 0 - H5S_diminfo_valid_t diminfo_valid; /* Diminfo status */ - H5S_diminfo_valid_t rebuild_status; /* Diminfo status after rebuild */ -#endif - H5S_sel_type sel_type; /* Selection type */ - herr_t ret; /* Return value */ - - /* dimensions of rank 1 to rank 5 */ - hsize_t dims1[] = {SPACEUD1_DIM0}; - hsize_t dims3[] = {SPACEUD3_DIM0, SPACEUD3_DIM1, SPACEUD3_DIM2}; - - /* The start of the hyperslab */ - hsize_t start1[1], start3[3]; - - /* The stride of the hyperslab */ - hsize_t stride1[1], stride3[3]; - - /* The number of blocks for the hyperslab */ - hsize_t count1[1], count3[3]; - - /* The size of each block for the hyperslab */ - hsize_t block1[1], block3[3]; - - /* Output message about test being performed */ - MESSAGE(6, ("Testing functionality to update hyperslab dimension info\n")); - - MESSAGE(7, ("Testing functionality to update 1-D hyperslab dimension info\n")); - - /* - * Test adding regularly spaced distinct blocks - */ - - /* Create 1-D dataspace */ - space_id = H5Screate_simple(1, dims1, NULL); - - /* Create single block */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block after first, with OR */ - start1[0] = 6; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block before first, this time with XOR */ - start1[0] = 0; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add two blocks after current block */ - start1[0] = 9; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add two blocks overlapping current block, with OR */ - start1[0] = 9; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add two blocks partially overlapping current block, with OR */ - start1[0] = 12; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add two blocks partially overlapping current block, with XOR */ - start1[0] = 15; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO, after rebuild it should be IMPOSSIBLE */ - ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ - if (rebuild_status != H5S_DIMINFO_VALID_IMPOSSIBLE) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } /* end if */ -#endif - - /* Fill in missing block */ - start1[0] = 15; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO, after rebuild it should be YES */ - ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ - if (rebuild_status != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } /* end if */ -#endif - /* - * Test adding contiguous blocks - */ - - /* Create single block */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block immediately after first, with OR */ - start1[0] = 5; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block immediately before first, with XOR */ - start1[0] = 1; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add differently size block immediately after current, with OR */ - start1[0] = 7; - count1[0] = 1; - block1[0] = 7; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* - * Test adding overlapping blocks - */ - - /* Create single block */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block completely overlapping first, with OR */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block partially overlapping first, with OR */ - start1[0] = 4; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block completely enclosing current, with OR */ - start1[0] = 2; - count1[0] = 1; - block1[0] = 5; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add block completely enclosed by current, with OR */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add equally sized block partially overlapping current, with XOR */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 5; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Fill in hole in block */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 4; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO, after rebuild it should be YES */ - ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ - if (rebuild_status != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } /* end if */ -#endif - - /* Add differently sized block partially overlapping current, with XOR */ - start1[0] = 4; - count1[0] = 1; - block1[0] = 5; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Fill in hole in block */ - start1[0] = 4; - count1[0] = 1; - block1[0] = 4; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO, after rebuild it should be YES */ - ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status); - CHECK(ret, FAIL, "H5S__get_rebuild_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ - if (rebuild_status != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_rebuild"); - } /* end if */ -#endif - - /* Add block completely overlapping current, with XOR */ - start1[0] = 2; - count1[0] = 1; - block1[0] = 7; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - sel_type = H5Sget_select_type(space_id); - VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type"); - - /* - * Test various conditions that break the fast algorithm - */ - - /* Create multiple blocks */ - start1[0] = 3; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create single block with start out of phase */ - start1[0] = 8; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks */ - start1[0] = 3; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks with start out of phase */ - start1[0] = 8; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks */ - start1[0] = 3; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks with wrong stride */ - start1[0] = 9; - stride1[0] = 4; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create single block */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create single block with wrong size */ - start1[0] = 6; - count1[0] = 1; - block1[0] = 1; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create single block */ - start1[0] = 3; - count1[0] = 1; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks with wrong size */ - start1[0] = 6; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 1; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks */ - start1[0] = 3; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create single block with wrong size */ - start1[0] = 9; - count1[0] = 1; - block1[0] = 1; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks */ - start1[0] = 3; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks with wrong size */ - start1[0] = 9; - stride1[0] = 3; - count1[0] = 2; - block1[0] = 1; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - MESSAGE(7, ("Testing functionality to update 3-D hyperslab dimension info\n")); - - /* Create 3-D dataspace */ - space_id = H5Screate_simple(3, dims3, NULL); - - /* Create multiple blocks */ - start3[0] = 0; - start3[1] = 1; - start3[2] = 2; - stride3[0] = 2; - stride3[1] = 3; - stride3[2] = 4; - count3[0] = 4; - count3[1] = 3; - count3[2] = 2; - block3[0] = 1; - block3[1] = 2; - block3[2] = 3; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add blocks with same values in all dimensions */ - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add blocks with same values in two dimensions */ - start3[0] = 8; - stride3[0] = 1; - count3[0] = 1; - block3[0] = 1; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks */ - start3[0] = 0; - start3[1] = 1; - start3[2] = 2; - stride3[0] = 2; - stride3[1] = 3; - stride3[2] = 4; - count3[0] = 4; - count3[1] = 3; - count3[2] = 2; - block3[0] = 1; - block3[1] = 2; - block3[2] = 3; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add blocks with same values in one dimension */ - start3[0] = 8; - start3[1] = 10; - stride3[0] = 1; - stride3[1] = 1; - count3[0] = 1; - count3[1] = 1; - block3[0] = 1; - block3[1] = 2; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Create multiple blocks */ - start3[0] = 0; - start3[1] = 1; - start3[2] = 2; - stride3[0] = 2; - stride3[1] = 3; - stride3[2] = 4; - count3[0] = 4; - count3[1] = 3; - count3[2] = 2; - block3[0] = 1; - block3[1] = 2; - block3[2] = 3; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be YES */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_YES) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - /* Add blocks with same values in no dimensions */ - start3[0] = 8; - start3[1] = 10; - start3[2] = 10; - stride3[0] = 1; - stride3[1] = 1; - stride3[2] = 1; - count3[0] = 1; - count3[1] = 1; - count3[2] = 1; - block3[0] = 1; - block3[1] = 2; - block3[2] = 3; - ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); -#if 0 - /* diminfo_valid should be NO */ - ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid); - CHECK(ret, FAIL, "H5S__get_diminfo_status_test"); - if (diminfo_valid != H5S_DIMINFO_VALID_NO) { - ret = FAIL; - CHECK(ret, FAIL, "H5S_hyper_update_diminfo"); - } /* end if */ -#endif - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); -} /* end test_space_update_diminfo() */ - -/**************************************************************** -** -** test_select_hyper_chunk_offset(): Tests selections on dataspace, -** verify that offsets for hyperslab selections are working in -** chunked datasets. -** -****************************************************************/ -#if 0 -static void -test_select_hyper_chunk_offset(void) -{ - hid_t fid; /* File ID */ - hid_t sid; /* Dataspace ID */ - hid_t msid; /* Memory dataspace ID */ - hid_t did; /* Dataset ID */ - const hsize_t mem_dims[1] = {SPACE10_DIM1}; /* Dataspace dimensions for memory */ - const hsize_t dims[1] = {0}; /* Dataspace initial dimensions */ - const hsize_t maxdims[1] = {H5S_UNLIMITED}; /* Dataspace mam dims */ - int *wbuf; /* Buffer for writing data */ - int *rbuf; /* Buffer for reading data */ - hid_t dcpl; /* Dataset creation property list ID */ - hsize_t chunks[1] = {SPACE10_CHUNK_SIZE}; /* Chunk size */ - hsize_t start[1] = {0}; /* The start of the hyperslab */ - hsize_t count[1] = {SPACE10_CHUNK_SIZE}; /* The size of the hyperslab */ - int i, j; /* Local index */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing hyperslab selections using offsets in chunked datasets\n")); - - /* Allocate buffers */ - wbuf = (int *)malloc(sizeof(int) * SPACE10_DIM1); - CHECK_PTR(wbuf, "malloc"); - rbuf = (int *)calloc(sizeof(int), SPACE10_DIM1); - CHECK_PTR(rbuf, "calloc"); - - /* Initialize the write buffer */ - for (i = 0; i < SPACE10_DIM1; i++) - wbuf[i] = i; - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set to chunked storage layout */ - ret = H5Pset_layout(dcpl, H5D_CHUNKED); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Set the chunk size */ - ret = H5Pset_chunk(dcpl, 1, chunks); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create dataspace for memory */ - msid = H5Screate_simple(1, mem_dims, NULL); - CHECK(msid, FAIL, "H5Screate_simple"); - - /* Select the correct chunk in the memory dataspace */ - ret = H5Sselect_hyperslab(msid, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for dataset */ - sid = H5Screate_simple(1, dims, maxdims); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Create the dataset */ - did = H5Dcreate2(fid, "fooData", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Loop over writing out each chunk */ - for (i = SPACE10_CHUNK_SIZE; i <= SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) { - hssize_t offset[1]; /* Offset of selection */ - hid_t fsid; /* File dataspace ID */ - hsize_t size[1]; /* The size to extend the dataset to */ - - /* Extend the dataset */ - size[0] = (hsize_t)i; /* The size to extend the dataset to */ - ret = H5Dset_extent(did, size); - CHECK(ret, FAIL, "H5Dset_extent"); - - /* Get the (extended) dataspace from the dataset */ - fsid = H5Dget_space(did); - CHECK(fsid, FAIL, "H5Dget_space"); - - /* Select the correct chunk in the dataset */ - ret = H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Set the selection offset for the file dataspace */ - offset[0] = i - SPACE10_CHUNK_SIZE; - ret = H5Soffset_simple(fsid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Set the selection offset for the memory dataspace */ - offset[0] = SPACE10_DIM1 - i; - ret = H5Soffset_simple(msid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Write the data to the chunk */ - ret = H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the file dataspace copy */ - ret = H5Sclose(fsid); - CHECK(ret, FAIL, "H5Sclose"); - } - - /* Read the data back in */ - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify the information read in */ - for (i = 0; i < SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) - for (j = 0; j < SPACE10_CHUNK_SIZE; j++) - if (wbuf[i + j] != rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j]) - TestErrPrintf("Line: %d - Error! i=%d, j=%d, rbuf=%d, wbuf=%d\n", __LINE__, i, j, - rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j], wbuf[i + j]); - - /* Check with 'OR'ed set of hyperslab selections, which makes certain the - * hyperslab spanlist code gets tested. -QAK - */ - - /* Re-initialize the write buffer */ - for (i = 0; i < SPACE10_DIM1; i++) - wbuf[i] = i * 2; - - /* Change the selected the region in the memory dataspace */ - start[0] = 0; - count[0] = SPACE10_CHUNK_SIZE / 3; - ret = H5Sselect_hyperslab(msid, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - start[0] = (2 * SPACE10_CHUNK_SIZE) / 3; - ret = H5Sselect_hyperslab(msid, H5S_SELECT_OR, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Loop over writing out each chunk */ - for (i = SPACE10_CHUNK_SIZE; i <= SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) { - hssize_t offset[1]; /* Offset of selection */ - hid_t fsid; /* File dataspace ID */ - hsize_t size[1]; /* The size to extend the dataset to */ - - /* Extend the dataset */ - size[0] = (hsize_t)i; /* The size to extend the dataset to */ - ret = H5Dset_extent(did, size); - CHECK(ret, FAIL, "H5Dset_extent"); - - /* Get the (extended) dataspace from the dataset */ - fsid = H5Dget_space(did); - CHECK(fsid, FAIL, "H5Dget_space"); - - /* Select the correct region in the dataset */ - start[0] = 0; - ret = H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - start[0] = (2 * SPACE10_CHUNK_SIZE) / 3; - ret = H5Sselect_hyperslab(fsid, H5S_SELECT_OR, start, NULL, count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Set the selection offset for the file dataspace */ - offset[0] = i - SPACE10_CHUNK_SIZE; - ret = H5Soffset_simple(fsid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Set the selection offset for the memory dataspace */ - offset[0] = SPACE10_DIM1 - i; - ret = H5Soffset_simple(msid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Write the data to the chunk */ - ret = H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Close the file dataspace copy */ - ret = H5Sclose(fsid); - CHECK(ret, FAIL, "H5Sclose"); - } - - /* Read the data back in */ - ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Verify the information read in */ - for (i = 0; i < SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) - for (j = 0; j < SPACE10_CHUNK_SIZE; j++) - /* We're not writing out the "middle" of each chunk, so don't check that */ - if (j < (SPACE10_CHUNK_SIZE / 3) || j >= ((2 * SPACE10_CHUNK_SIZE) / 3)) - if (wbuf[i + j] != rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j]) - TestErrPrintf("Line: %d - Error! i=%d, j=%d, rbuf=%d, wbuf=%d\n", __LINE__, i, j, - rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j], wbuf[i + j]); - - /* Close the memory dataspace */ - ret = H5Sclose(msid); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close the dataset */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Free the buffers */ - free(wbuf); - free(rbuf); -} /* test_select_hyper_chunk_offset() */ -#endif -/**************************************************************** -** -** test_select_hyper_chunk_offset2(): Tests selections on dataspace, -** another test to verify that offsets for hyperslab selections are -** working in chunked datasets. -** -****************************************************************/ -#if 0 -static void -test_select_hyper_chunk_offset2(void) -{ - hid_t file, dataset; /* handles */ - hid_t dataspace; - hid_t memspace; - hid_t dcpl; /* Dataset creation property list */ - herr_t status; - unsigned data_out[SPACE12_DIM0]; /* output buffer */ - unsigned data_in[SPACE12_CHUNK_DIM0]; /* input buffer */ - hsize_t dims[SPACE12_RANK] = {SPACE12_DIM0}; /* Dimension size */ - hsize_t chunk_dims[SPACE12_RANK] = {SPACE12_CHUNK_DIM0}; /* Chunk size */ - hsize_t start[SPACE12_RANK]; /* Start of hyperslab */ - hsize_t count[SPACE12_RANK]; /* Size of hyperslab */ - hssize_t offset[SPACE12_RANK]; /* hyperslab offset in the file */ - unsigned u, v; /* Local index variables */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing more hyperslab selections using offsets in chunked datasets\n")); - - /* Initialize data to write out */ - for (u = 0; u < SPACE12_DIM0; u++) - data_out[u] = u; - - /* Create the file */ - file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fcreate"); - - /* Create dataspace */ - dataspace = H5Screate_simple(SPACE12_RANK, dims, NULL); - CHECK(dataspace, FAIL, "H5Screate_simple"); - - /* Create dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set chunk sizes */ - status = H5Pset_chunk(dcpl, SPACE12_RANK, chunk_dims); - CHECK(status, FAIL, "H5Pset_chunk"); - - /* Create dataset */ - dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UINT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close DCPL */ - status = H5Pclose(dcpl); - CHECK(status, FAIL, "H5Pclose"); - - /* Write out entire dataset */ - status = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_out); - CHECK(status, FAIL, "H5Dclose"); - - /* Create memory dataspace (same size as a chunk) */ - memspace = H5Screate_simple(SPACE12_RANK, chunk_dims, NULL); - CHECK(dataspace, FAIL, "H5Screate_simple"); - - /* - * Define hyperslab in the file dataspace. - */ - start[0] = 0; - count[0] = SPACE12_CHUNK_DIM0; - status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, NULL, count, NULL); - CHECK(status, FAIL, "H5Sselect_hyperslab"); - - /* Loop through retrieving data from file, checking it against data written */ - for (u = 0; u < SPACE12_DIM0; u += SPACE12_CHUNK_DIM0) { - /* Set the offset of the file selection */ - offset[0] = u; - status = H5Soffset_simple(dataspace, offset); - CHECK(status, FAIL, "H5Soffset_simple"); - - /* Read in buffer of data */ - status = H5Dread(dataset, H5T_NATIVE_UINT, memspace, dataspace, H5P_DEFAULT, data_in); - CHECK(status, FAIL, "H5Dread"); - - /* Check data read in */ - for (v = 0; v < SPACE12_CHUNK_DIM0; v++) - if (data_out[u + v] != data_in[v]) - TestErrPrintf("Error! data_out[%u]=%u, data_in[%u]=%u\n", (unsigned)(u + v), data_out[u + v], - v, data_in[v]); - } /* end for */ - - status = H5Dclose(dataset); - CHECK(status, FAIL, "H5Dclose"); - - status = H5Sclose(dataspace); - CHECK(status, FAIL, "H5Sclose"); - - status = H5Sclose(memspace); - CHECK(status, FAIL, "H5Sclose"); - - status = H5Fclose(file); - CHECK(status, FAIL, "H5Fclose"); -} /* test_select_hyper_chunk_offset2() */ -#endif -/**************************************************************** -** -** test_select_bounds(): Tests selection bounds on dataspaces, -** both with and without offsets. -** -****************************************************************/ -static void -test_select_bounds(void) -{ - hid_t sid; /* Dataspace ID */ - const hsize_t dims[SPACE11_RANK] = {SPACE11_DIM1, SPACE11_DIM2}; /* Dataspace dimensions */ - hsize_t coord[SPACE11_NPOINTS][SPACE11_RANK]; /* Coordinates for point selection */ - hsize_t start[SPACE11_RANK]; /* The start of the hyperslab */ - hsize_t stride[SPACE11_RANK]; /* The stride between block starts for the hyperslab */ - hsize_t count[SPACE11_RANK]; /* The number of blocks for the hyperslab */ - hsize_t block[SPACE11_RANK]; /* The size of each block for the hyperslab */ - hssize_t offset[SPACE11_RANK]; /* Offset amount for selection */ - hsize_t low_bounds[SPACE11_RANK]; /* The low bounds for the selection */ - hsize_t high_bounds[SPACE11_RANK]; /* The high bounds for the selection */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing selection bounds\n")); - - /* Create dataspace */ - sid = H5Screate_simple(SPACE11_RANK, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Get bounds for 'all' selection */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 0, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], SPACE11_DIM1 - 1, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], SPACE11_DIM2 - 1, "H5Sget_select_bounds"); - - /* Set offset for selection */ - offset[0] = 1; - offset[1] = 1; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Get bounds for 'all' selection with offset (which should be ignored) */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 0, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], SPACE11_DIM1 - 1, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], SPACE11_DIM2 - 1, "H5Sget_select_bounds"); - - /* Reset offset for selection */ - offset[0] = 0; - offset[1] = 0; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Set 'none' selection */ - ret = H5Sselect_none(sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Get bounds for 'none' selection */ - H5E_BEGIN_TRY - { - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_select_bounds"); - - /* Set point selection */ - coord[0][0] = 3; - coord[0][1] = 3; - coord[1][0] = 3; - coord[1][1] = 96; - coord[2][0] = 96; - coord[2][1] = 3; - coord[3][0] = 96; - coord[3][1] = 96; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)SPACE11_NPOINTS, (const hsize_t *)coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Get bounds for point selection */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 3, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 3, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], SPACE11_DIM1 - 4, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], SPACE11_DIM2 - 4, "H5Sget_select_bounds"); - - /* Set bad offset for selection */ - offset[0] = 5; - offset[1] = -5; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Get bounds for hyperslab selection with negative offset */ - H5E_BEGIN_TRY - { - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_select_bounds"); - - /* Set valid offset for selection */ - offset[0] = 2; - offset[1] = -2; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Get bounds for point selection with offset */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 5, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 1, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], SPACE11_DIM1 - 2, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], SPACE11_DIM2 - 6, "H5Sget_select_bounds"); - - /* Reset offset for selection */ - offset[0] = 0; - offset[1] = 0; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Set "regular" hyperslab selection */ - start[0] = 2; - start[1] = 2; - stride[0] = 10; - stride[1] = 10; - count[0] = 4; - count[1] = 4; - block[0] = 5; - block[1] = 5; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Get bounds for hyperslab selection */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 2, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 2, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], 36, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], 36, "H5Sget_select_bounds"); - - /* Set bad offset for selection */ - offset[0] = 5; - offset[1] = -5; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Get bounds for hyperslab selection with negative offset */ - H5E_BEGIN_TRY - { - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_select_bounds"); - - /* Set valid offset for selection */ - offset[0] = 5; - offset[1] = -2; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Get bounds for hyperslab selection with offset */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 7, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], 41, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], 34, "H5Sget_select_bounds"); - - /* Reset offset for selection */ - offset[0] = 0; - offset[1] = 0; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Make "irregular" hyperslab selection */ - start[0] = 20; - start[1] = 20; - stride[0] = 20; - stride[1] = 20; - count[0] = 2; - count[1] = 2; - block[0] = 10; - block[1] = 10; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Get bounds for hyperslab selection */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 2, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 2, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], 49, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], 49, "H5Sget_select_bounds"); - - /* Set bad offset for selection */ - offset[0] = 5; - offset[1] = -5; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Get bounds for hyperslab selection with negative offset */ - H5E_BEGIN_TRY - { - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_select_bounds"); - - /* Set valid offset for selection */ - offset[0] = 5; - offset[1] = -2; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Get bounds for hyperslab selection with offset */ - ret = H5Sget_select_bounds(sid, low_bounds, high_bounds); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(low_bounds[0], 7, "H5Sget_select_bounds"); - VERIFY(low_bounds[1], 0, "H5Sget_select_bounds"); - VERIFY(high_bounds[0], 54, "H5Sget_select_bounds"); - VERIFY(high_bounds[1], 47, "H5Sget_select_bounds"); - - /* Reset offset for selection */ - offset[0] = 0; - offset[1] = 0; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_select_bounds() */ - -/**************************************************************** -** -** test_hyper_regular(): Tests query operations on regular hyperslabs -** -****************************************************************/ -static void -test_hyper_regular(void) -{ - hid_t sid; /* Dataspace ID */ - const hsize_t dims[SPACE13_RANK] = {SPACE13_DIM1, SPACE13_DIM2, SPACE13_DIM3}; /* Dataspace dimensions */ - hsize_t coord[SPACE13_NPOINTS][SPACE13_RANK]; /* Coordinates for point selection */ - hsize_t start[SPACE13_RANK]; /* The start of the hyperslab */ - hsize_t stride[SPACE13_RANK]; /* The stride between block starts for the hyperslab */ - hsize_t count[SPACE13_RANK]; /* The number of blocks for the hyperslab */ - hsize_t block[SPACE13_RANK]; /* The size of each block for the hyperslab */ - hsize_t t_start[SPACE13_RANK]; /* Temporary start of the hyperslab */ - hsize_t t_count[SPACE13_RANK]; /* Temporary number of blocks for the hyperslab */ - hsize_t q_start[SPACE13_RANK]; /* The queried start of the hyperslab */ - hsize_t q_stride[SPACE13_RANK]; /* The queried stride between block starts for the hyperslab */ - hsize_t q_count[SPACE13_RANK]; /* The queried number of blocks for the hyperslab */ - hsize_t q_block[SPACE13_RANK]; /* The queried size of each block for the hyperslab */ - htri_t is_regular; /* Whether a hyperslab selection is regular */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing queries on regular hyperslabs\n")); - - /* Create dataspace */ - sid = H5Screate_simple(SPACE13_RANK, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Query if 'all' selection is regular hyperslab (should fail) */ - H5E_BEGIN_TRY - { - is_regular = H5Sis_regular_hyperslab(sid); - } - H5E_END_TRY - VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab"); - - /* Query regular hyperslab selection info (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); - - /* Set 'none' selection */ - ret = H5Sselect_none(sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Query if 'none' selection is regular hyperslab (should fail) */ - H5E_BEGIN_TRY - { - is_regular = H5Sis_regular_hyperslab(sid); - } - H5E_END_TRY - VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab"); - - /* Query regular hyperslab selection info (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); - - /* Set point selection */ - coord[0][0] = 3; - coord[0][1] = 3; - coord[0][2] = 3; - coord[1][0] = 3; - coord[1][1] = 48; - coord[1][2] = 48; - coord[2][0] = 48; - coord[2][1] = 3; - coord[2][2] = 3; - coord[3][0] = 48; - coord[3][1] = 48; - coord[3][2] = 48; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)SPACE13_NPOINTS, (const hsize_t *)coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Query if 'point' selection is regular hyperslab (should fail) */ - H5E_BEGIN_TRY - { - is_regular = H5Sis_regular_hyperslab(sid); - } - H5E_END_TRY - VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab"); - - /* Query regular hyperslab selection info (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); - - /* Set "regular" hyperslab selection */ - start[0] = 2; - start[1] = 2; - start[2] = 2; - stride[0] = 5; - stride[1] = 5; - stride[2] = 5; - count[0] = 3; - count[1] = 3; - count[2] = 3; - block[0] = 4; - block[1] = 4; - block[2] = 4; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Query if 'hyperslab' selection is regular hyperslab (should be true) */ - is_regular = H5Sis_regular_hyperslab(sid); - VERIFY(is_regular, true, "H5Sis_regular_hyperslab"); - - /* Retrieve the hyperslab parameters */ - ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); - CHECK(ret, FAIL, "H5Sget_regular_hyperslab"); - - /* Verify the hyperslab parameters */ - for (u = 0; u < SPACE13_RANK; u++) { - if (start[u] != q_start[u]) - ERROR("H5Sget_regular_hyperslab, start"); - if (stride[u] != q_stride[u]) - ERROR("H5Sget_regular_hyperslab, stride"); - if (count[u] != q_count[u]) - ERROR("H5Sget_regular_hyperslab, count"); - if (block[u] != q_block[u]) - ERROR("H5Sget_regular_hyperslab, block"); - } /* end for */ - - /* 'OR' in another point */ - t_start[0] = 0; - t_start[1] = 0; - t_start[2] = 0; - t_count[0] = 1; - t_count[1] = 1; - t_count[2] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, t_start, NULL, t_count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Query if 'hyperslab' selection is regular hyperslab (should be false) */ - is_regular = H5Sis_regular_hyperslab(sid); - VERIFY(is_regular, false, "H5Sis_regular_hyperslab"); - - /* Query regular hyperslab selection info (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Sget_regular_hyperslab"); - - /* 'XOR' in the point again, to remove it, which should make it regular again */ - t_start[0] = 0; - t_start[1] = 0; - t_start[2] = 0; - t_count[0] = 1; - t_count[1] = 1; - t_count[2] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, t_start, NULL, t_count, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Query if 'hyperslab' selection is regular hyperslab (should be true) */ - is_regular = H5Sis_regular_hyperslab(sid); - VERIFY(is_regular, true, "H5Sis_regular_hyperslab"); - - /* Retrieve the hyperslab parameters */ - ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block); - CHECK(ret, FAIL, "H5Sget_regular_hyperslab"); - - /* Verify the hyperslab parameters */ - for (u = 0; u < SPACE13_RANK; u++) { - if (start[u] != q_start[u]) - ERROR("H5Sget_regular_hyperslab, start"); - if (stride[u] != q_stride[u]) - ERROR("H5Sget_regular_hyperslab, stride"); - if (count[u] != q_count[u]) - ERROR("H5Sget_regular_hyperslab, count"); - if (block[u] != q_block[u]) - ERROR("H5Sget_regular_hyperslab, block"); - } /* end for */ - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_hyper_regular() */ - -/**************************************************************** -** -** test_hyper_unlim(): Tests unlimited hyperslab selections -** -****************************************************************/ -static void -test_hyper_unlim_check(hid_t sid, hsize_t *dims, hssize_t endpoints, hssize_t enblocks, hsize_t *eblock1, - hsize_t *eblock2) -{ - hid_t lim_sid; - hsize_t start[3]; - H5S_sel_type sel_type; - hssize_t npoints; - hssize_t nblocks; - hsize_t blocklist[12]; - herr_t ret; - - assert(enblocks <= 2); - - /* Copy sid to lim_sid */ - lim_sid = H5Scopy(sid); - CHECK(lim_sid, FAIL, "H5Scopy"); - - /* "And" lim_sid with dims to create limited selection */ - memset(start, 0, sizeof(start)); - ret = H5Sselect_hyperslab(lim_sid, H5S_SELECT_AND, start, NULL, dims, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Check number of elements */ - npoints = H5Sget_select_npoints(lim_sid); - CHECK(npoints, FAIL, "H5Sget_select_npoints"); - VERIFY(npoints, endpoints, "H5Sget_select_npoints"); - - /* Get selection type */ - sel_type = H5Sget_select_type(lim_sid); - CHECK(sel_type, H5S_SEL_ERROR, "H5Sget_select_type"); - - /* Only examine blocks for hyperslab selection */ - if (sel_type == H5S_SEL_HYPERSLABS) { - /* Get number of blocks */ - nblocks = H5Sget_select_hyper_nblocks(lim_sid); - CHECK(nblocks, FAIL, "H5Sget_select_hyper_nblocks"); - VERIFY(nblocks, enblocks, "H5Sget_select_hyper_nblocks"); - - if (nblocks > 0) { - /* Get blocklist */ - ret = H5Sget_select_hyper_blocklist(lim_sid, (hsize_t)0, (hsize_t)nblocks, blocklist); - CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist"); - - /* Verify blocklist */ - if (nblocks == (hssize_t)1) { - if (memcmp(blocklist, eblock1, 6 * sizeof(eblock1[0])) != 0) - ERROR("H5Sget_select_hyper_blocklist"); - } /* end if */ - else { - assert(nblocks == (hssize_t)2); - if (memcmp(blocklist, eblock1, 6 * sizeof(eblock1[0])) != 0) { - if (memcmp(blocklist, eblock2, 6 * sizeof(eblock2[0])) != 0) - ERROR("H5Sget_select_hyper_blocklist"); - if (memcmp(&blocklist[6], eblock1, 6 * sizeof(eblock1[0])) != 0) - ERROR("H5Sget_select_hyper_blocklist"); - } /* end if */ - else if (memcmp(&blocklist[6], eblock2, 6 * sizeof(eblock2[0])) != 0) - ERROR("H5Sget_select_hyper_blocklist"); - } /* end else */ - } /* end if */ - } /* end if */ - else if (sel_type != H5S_SEL_NONE) - ERROR("H5Sget_select_type"); - - /* Close the limited dataspace */ - ret = H5Sclose(lim_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* end test_hyper_unlim_check() */ - -static void -test_hyper_unlim(void) -{ - hid_t sid; - hsize_t dims[3] = {4, 4, 7}; - hsize_t mdims[3] = {4, H5S_UNLIMITED, 7}; - hsize_t start[3] = {1, 2, 1}; - hsize_t stride[3] = {1, 1, 3}; - hsize_t count[3] = {1, 1, 2}; - hsize_t block[3] = {2, H5S_UNLIMITED, 2}; - hsize_t start2[3]; - hsize_t count2[3]; - hsize_t eblock1[6] = {1, 2, 1, 2, 3, 2}; - hsize_t eblock2[6] = {1, 2, 4, 2, 3, 5}; - hssize_t offset[3] = {0, -1, 0}; - hssize_t ssize_out; - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(6, ("Testing unlimited hyperslab selections\n")); - - /* Create dataspace */ - sid = H5Screate_simple(3, dims, mdims); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Select unlimited hyperslab */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Check with unlimited dimension clipped to 4 */ - test_hyper_unlim_check(sid, dims, (hssize_t)16, (hssize_t)2, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 3 */ - dims[1] = 3; - eblock1[4] = 2; - eblock2[4] = 2; - test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)2, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 2 */ - dims[1] = 2; - test_hyper_unlim_check(sid, dims, (hssize_t)0, (hssize_t)0, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 1 */ - dims[1] = 1; - test_hyper_unlim_check(sid, dims, (hssize_t)0, (hssize_t)0, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 7 */ - dims[1] = 7; - eblock1[4] = 6; - eblock2[4] = 6; - test_hyper_unlim_check(sid, dims, (hssize_t)40, (hssize_t)2, eblock1, eblock2); - - /* Set offset of selection */ - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Check with adjusted offset (should not affect result) */ - test_hyper_unlim_check(sid, dims, (hssize_t)40, (hssize_t)2, eblock1, eblock2); - - /* Reset offset of selection */ - offset[1] = (hssize_t)0; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* - * Now try with multiple blocks in unlimited dimension - */ - stride[1] = 3; - stride[2] = 1; - count[1] = H5S_UNLIMITED; - count[2] = 1; - block[1] = 2; - - /* Select unlimited hyperslab */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Check with new selection */ - eblock1[1] = 2; - eblock1[4] = 3; - eblock2[1] = 5; - eblock2[2] = 1; - eblock2[4] = 6; - eblock2[5] = 2; - test_hyper_unlim_check(sid, dims, (hssize_t)16, (hssize_t)2, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 3 */ - dims[1] = 3; - eblock1[4] = 2; - test_hyper_unlim_check(sid, dims, (hssize_t)4, (hssize_t)1, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 4 */ - dims[1] = 4; - eblock1[4] = 3; - test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)1, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 5 */ - dims[1] = 5; - eblock1[4] = 3; - test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)1, eblock1, eblock2); - - /* Check with unlimited dimension clipped to 6 */ - dims[1] = 6; - eblock1[4] = 3; - eblock2[4] = 5; - test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); - - /* Set offset of selection */ - offset[1] = (hssize_t)-1; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Check with adjusted offset (should not affect result) */ - test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); - - /* Set offset of selection */ - offset[1] = (hssize_t)3; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* Check with adjusted offset (should not affect result) */ - test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); - - /* Reset offset of selection */ - offset[1] = (hssize_t)0; - ret = H5Soffset_simple(sid, offset); - CHECK(ret, FAIL, "H5Soffset_simple"); - - /* - * Now try invalid operations - */ - H5E_BEGIN_TRY - { - /* Try multiple unlimited dimensions */ - start[0] = 1; - start[1] = 2; - start[2] = 1; - stride[0] = 1; - stride[1] = 3; - stride[2] = 3; - count[0] = 1; - count[1] = H5S_UNLIMITED; - count[2] = H5S_UNLIMITED; - block[0] = 2; - block[1] = 2; - block[2] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - - /* Try unlimited count and block */ - count[2] = 2; - block[1] = H5S_UNLIMITED; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - } - H5E_END_TRY - - /* Try operations with two unlimited selections */ - block[1] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - H5E_BEGIN_TRY - { - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, NULL, count, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start, NULL, count, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, NULL, count, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, NULL, count, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start, NULL, count, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - } - H5E_END_TRY - - /* Try invalid combination operations */ - H5E_BEGIN_TRY - { - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, NULL, block, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, NULL, block, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, NULL, block, NULL); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - } - H5E_END_TRY - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, block, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - H5E_BEGIN_TRY - { - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, stride, count, block); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start, stride, count, block); - VERIFY(ret, FAIL, "H5Sselect_hyperslab"); - } - H5E_END_TRY - - /* - * Now test valid combination operations - */ - /* unlim AND non-unlim */ - count[0] = 1; - count[1] = H5S_UNLIMITED; - count[2] = 2; - block[0] = 2; - block[1] = 2; - block[2] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - start2[0] = 2; - start2[1] = 2; - start2[2] = 0; - count2[0] = 5; - count2[1] = 4; - count2[2] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start2, NULL, count2, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - eblock1[0] = 2; - eblock1[3] = 2; - eblock1[1] = 2; - eblock1[4] = 3; - eblock1[2] = 1; - eblock1[5] = 1; - eblock2[0] = 2; - eblock2[3] = 2; - eblock2[1] = 5; - eblock2[4] = 5; - eblock2[2] = 1; - eblock2[5] = 1; - dims[0] = 50; - dims[1] = 50; - dims[2] = 50; - test_hyper_unlim_check(sid, dims, (hssize_t)3, (hssize_t)2, eblock1, eblock2); - - /* unlim NOTA non-unlim */ - count[0] = 1; - count[1] = H5S_UNLIMITED; - count[2] = 2; - block[0] = 2; - block[1] = 2; - block[2] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - start2[0] = 1; - start2[1] = 5; - start2[2] = 2; - count2[0] = 2; - count2[1] = 2; - count2[2] = 6; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start2, NULL, count2, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - eblock1[0] = 1; - eblock1[3] = 2; - eblock1[1] = 5; - eblock1[4] = 6; - eblock1[2] = 3; - eblock1[5] = 3; - eblock2[0] = 1; - eblock2[3] = 2; - eblock2[1] = 5; - eblock2[4] = 6; - eblock2[2] = 6; - eblock2[5] = 7; - dims[0] = 50; - dims[1] = 50; - dims[2] = 50; - test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); - - /* non-unlim AND unlim */ - start2[0] = 2; - start2[1] = 2; - start2[2] = 0; - count2[0] = 5; - count2[1] = 4; - count2[2] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start2, NULL, count2, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - count[0] = 1; - count[1] = H5S_UNLIMITED; - count[2] = 2; - block[0] = 2; - block[1] = 2; - block[2] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - eblock1[0] = 2; - eblock1[3] = 2; - eblock1[1] = 2; - eblock1[4] = 3; - eblock1[2] = 1; - eblock1[5] = 1; - eblock2[0] = 2; - eblock2[3] = 2; - eblock2[1] = 5; - eblock2[4] = 5; - eblock2[2] = 1; - eblock2[5] = 1; - dims[0] = 50; - dims[1] = 50; - dims[2] = 50; - test_hyper_unlim_check(sid, dims, (hssize_t)3, (hssize_t)2, eblock1, eblock2); - - /* non-unlim NOTB unlim */ - start2[0] = 1; - start2[1] = 5; - start2[2] = 2; - count2[0] = 2; - count2[1] = 2; - count2[2] = 6; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start2, NULL, count2, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - count[0] = 1; - count[1] = H5S_UNLIMITED; - count[2] = 2; - block[0] = 2; - block[1] = 2; - block[2] = 2; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - eblock1[0] = 1; - eblock1[3] = 2; - eblock1[1] = 5; - eblock1[4] = 6; - eblock1[2] = 3; - eblock1[5] = 3; - eblock2[0] = 1; - eblock2[3] = 2; - eblock2[1] = 5; - eblock2[4] = 6; - eblock2[2] = 6; - eblock2[5] = 7; - dims[0] = 50; - dims[1] = 50; - dims[2] = 50; - test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2); - - /* Test H5Sget_select_npoints() */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - ssize_out = H5Sget_select_npoints(sid); - VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints"); - - /* Test H5Sget_select_hyper_nblocks() */ - H5E_BEGIN_TRY - { - ssize_out = H5Sget_select_hyper_nblocks(sid); - } - H5E_END_TRY - VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_hyper_nblocks"); - - /* Test H5Sget_select_bounds() */ - ret = H5Sget_select_bounds(sid, start2, count2); - CHECK(ret, FAIL, "H5Sget_select_bounds"); - VERIFY(start2[0], start[0], "H5Sget_select_bounds"); - VERIFY(start2[1], start[1], "H5Sget_select_bounds"); - VERIFY(start2[2], start[2], "H5Sget_select_bounds"); - VERIFY(count2[0], (long)(start[0] + (stride[0] * (count[0] - 1)) + block[0] - 1), "H5Sget_select_bounds"); - VERIFY(count2[1], H5S_UNLIMITED, "H5Sget_select_bounds"); - VERIFY(count2[2], (long)(start[2] + (stride[2] * (count[2] - 1)) + block[2] - 1), "H5Sget_select_bounds"); - - /* Close the dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* end test_hyper_unlim() */ - -/**************************************************************** -** -** test_internal_consistency(): Tests selections on dataspace, then -** verify that internal states of data structures of selections are -** consistent. -** -****************************************************************/ -static void -test_internal_consistency(void) -{ - hid_t all_sid; /* Dataspace ID with "all" selection */ - hid_t none_sid; /* Dataspace ID with "none" selection */ - hid_t single_pt_sid; /* Dataspace ID with single point selection */ - hid_t mult_pt_sid; /* Dataspace ID with multiple point selection */ - hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */ - hid_t single_hyper_all_sid; /* Dataspace ID with single block hyperslab - * selection that is the entire dataspace - */ - hid_t single_hyper_pt_sid; /* Dataspace ID with single block hyperslab - * selection that is the same as the single - * point selection - */ - hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */ - hid_t irreg_hyper_sid; /* Dataspace ID with irregular hyperslab selection */ - hid_t none_hyper_sid; /* Dataspace ID with "no hyperslabs" selection */ - hid_t scalar_all_sid; /* ID for scalar dataspace with "all" selection */ - hid_t scalar_none_sid; /* ID for scalar dataspace with "none" selection */ - hid_t tmp_sid; /* Temporary dataspace ID */ - hsize_t dims[] = {SPACE9_DIM1, SPACE9_DIM2}; - hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */ - hsize_t coord2[SPACE9_DIM2][SPACE9_RANK]; /* Coordinates for multiple point selection */ - hsize_t start[SPACE9_RANK]; /* Hyperslab start */ - hsize_t stride[SPACE9_RANK]; /* Hyperslab stride */ - hsize_t count[SPACE9_RANK]; /* Hyperslab block count */ - hsize_t block[SPACE9_RANK]; /* Hyperslab block size */ -#if 0 - htri_t check; /* Shape comparison return value */ -#endif - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Consistency of Internal States\n")); - assert(SPACE9_DIM2 >= POINT1_NPOINTS); - - /* Create dataspace for "all" selection */ - all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(all_sid, FAIL, "H5Screate_simple"); - - /* Select entire extent for dataspace */ - ret = H5Sselect_all(all_sid); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Create dataspace for "none" selection */ - none_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(none_sid, FAIL, "H5Screate_simple"); - - /* Un-Select entire extent for dataspace */ - ret = H5Sselect_none(none_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Create dataspace for single point selection */ - single_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_pt_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for multiple point selection */ - coord1[0][0] = 2; - coord1[0][1] = 2; - ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create dataspace for multiple point selection */ - mult_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(mult_pt_sid, FAIL, "H5Screate_simple"); - - /* Select sequence of ten points for multiple point selection */ - coord2[0][0] = 2; - coord2[0][1] = 2; - coord2[1][0] = 7; - coord2[1][1] = 2; - coord2[2][0] = 1; - coord2[2][1] = 4; - coord2[3][0] = 2; - coord2[3][1] = 6; - coord2[4][0] = 0; - coord2[4][1] = 8; - coord2[5][0] = 3; - coord2[5][1] = 2; - coord2[6][0] = 4; - coord2[6][1] = 4; - coord2[7][0] = 1; - coord2[7][1] = 0; - coord2[8][0] = 5; - coord2[8][1] = 1; - coord2[9][0] = 9; - coord2[9][1] = 3; - ret = H5Sselect_elements(mult_pt_sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create dataspace for single hyperslab selection */ - single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_hyper_sid, FAIL, "H5Screate_simple"); - - /* Select 10x10 hyperslab for single hyperslab selection */ - start[0] = 1; - start[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = (SPACE9_DIM1 - 2); - block[1] = (SPACE9_DIM2 - 2); - ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for single hyperslab selection with entire extent selected */ - single_hyper_all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_hyper_all_sid, FAIL, "H5Screate_simple"); - - /* Select entire extent for hyperslab selection */ - start[0] = 0; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = SPACE9_DIM1; - block[1] = SPACE9_DIM2; - ret = H5Sselect_hyperslab(single_hyper_all_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for single hyperslab selection with single point selected */ - single_hyper_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(single_hyper_pt_sid, FAIL, "H5Screate_simple"); - - /* Select entire extent for hyperslab selection */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(single_hyper_pt_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for regular hyperslab selection */ - regular_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(regular_hyper_sid, FAIL, "H5Screate_simple"); - - /* Select regular, strided hyperslab selection */ - start[0] = 2; - start[1] = 2; - stride[0] = 2; - stride[1] = 2; - count[0] = 5; - count[1] = 2; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for irregular hyperslab selection */ - irreg_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(irreg_hyper_sid, FAIL, "H5Screate_simple"); - - /* Create irregular hyperslab selection by OR'ing two blocks together */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 4; - start[1] = 4; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 3; - block[1] = 3; - ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for "no" hyperslab selection */ - none_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL); - CHECK(none_hyper_sid, FAIL, "H5Screate_simple"); - - /* Create "no" hyperslab selection by XOR'ing same blocks together */ - start[0] = 2; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 1; - count[1] = 1; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_XOR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create scalar dataspace for "all" selection */ - scalar_all_sid = H5Screate(H5S_SCALAR); - CHECK(scalar_all_sid, FAIL, "H5Screate"); - - /* Create scalar dataspace for "none" selection */ - scalar_none_sid = H5Screate(H5S_SCALAR); - CHECK(scalar_none_sid, FAIL, "H5Screate"); - - /* Un-Select entire extent for dataspace */ - ret = H5Sselect_none(scalar_none_sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Test all the selections created */ - - /* Test the copy of itself */ - tmp_sid = H5Scopy(all_sid); - CHECK(tmp_sid, FAIL, "H5Scopy"); -#if 0 - check = H5S__internal_consistency_test(tmp_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); -#endif - ret = H5Sclose(tmp_sid); - CHECK(ret, FAIL, "H5Sclose"); -#if 0 - /* Test "none" selection */ - check = H5S__internal_consistency_test(none_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test single point selection */ - check = H5S__internal_consistency_test(single_pt_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test multiple point selection */ - check = H5S__internal_consistency_test(mult_pt_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test "plain" single hyperslab selection */ - check = H5S__internal_consistency_test(single_hyper_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test "all" single hyperslab selection */ - check = H5S__internal_consistency_test(single_hyper_all_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test "single point" single hyperslab selection */ - check = H5S__internal_consistency_test(single_hyper_pt_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test regular, strided hyperslab selection */ - check = H5S__internal_consistency_test(regular_hyper_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test irregular hyperslab selection */ - check = H5S__internal_consistency_test(irreg_hyper_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test "no" hyperslab selection */ - check = H5S__internal_consistency_test(none_hyper_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test scalar "all" hyperslab selection */ - check = H5S__internal_consistency_test(scalar_all_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); - - /* Test scalar "none" hyperslab selection */ - check = H5S__internal_consistency_test(scalar_none_sid); - VERIFY(check, true, "H5S__internal_consistency_test"); -#endif - - /* Close dataspaces */ - ret = H5Sclose(all_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(none_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_pt_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(mult_pt_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_hyper_all_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(single_hyper_pt_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(regular_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(irreg_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(none_hyper_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(scalar_all_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(scalar_none_sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_internal_consistency() */ - -/**************************************************************** -** -** test_irreg_io(): Tests unusual selections on datasets, to stress the -** new hyperslab code. -** -****************************************************************/ -static void -test_irreg_io(void) -{ - hid_t fid; /* File ID */ - hid_t did; /* Dataset ID */ - hid_t dcpl_id; /* Dataset creation property list ID */ - hid_t sid; /* File dataspace ID */ - hid_t mem_sid; /* Memory dataspace ID */ - hsize_t dims[] = {6, 12}; /* Dataspace dimensions */ - hsize_t chunk_dims[] = {2, 2}; /* Chunk dimensions */ - hsize_t mem_dims[] = {32}; /* Memory dataspace dimensions */ - hsize_t start[2]; /* Hyperslab start */ - hsize_t stride[2]; /* Hyperslab stride */ - hsize_t count[2]; /* Hyperslab block count */ - hsize_t block[2]; /* Hyperslab block size */ - unsigned char wbuf[72]; /* Write buffer */ - unsigned char rbuf[32]; /* Read buffer */ - unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Irregular Hyperslab I/O\n")); - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create dataspace for dataset */ - sid = H5Screate_simple(2, dims, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Set chunk dimensions for dataset */ - dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl_id, FAIL, "H5Pcreate"); - ret = H5Pset_chunk(dcpl_id, 2, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create a dataset */ - did = H5Dcreate2(fid, SPACE1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dcreate2"); - - /* Initialize the write buffer */ - for (u = 0; u < 72; u++) - wbuf[u] = (unsigned char)u; - - /* Write entire dataset to disk */ - ret = H5Dwrite(did, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close the DCPL */ - ret = H5Pclose(dcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Create dataspace for memory selection */ - mem_sid = H5Screate_simple(1, mem_dims, NULL); - CHECK(mem_sid, FAIL, "H5Screate_simple"); - - /* Select 'L'-shaped region within dataset */ - start[0] = 0; - start[1] = 10; - stride[0] = 1; - stride[1] = 1; - count[0] = 4; - count[1] = 2; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 4; - start[1] = 0; - stride[0] = 1; - stride[1] = 1; - count[0] = 2; - count[1] = 12; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Reset the buffer */ - memset(rbuf, 0, sizeof(rbuf)); - - /* Read selection from disk */ - ret = H5Dread(did, H5T_NATIVE_UCHAR, mem_sid, sid, H5P_DEFAULT, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Close everything */ - ret = H5Sclose(mem_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); -} /* test_irreg_io() */ - -/**************************************************************** -** -** test_sel_iter(): Test selection iterator API routines. -** -****************************************************************/ -static void -test_sel_iter(void) -{ - hid_t sid; /* Dataspace ID */ - hid_t iter_id; /* Dataspace selection iterator ID */ - hsize_t dims1[] = {6, 12}; /* 2-D Dataspace dimensions */ - hsize_t coord1[POINT1_NPOINTS][2]; /* Coordinates for point selection */ - hsize_t start[2]; /* Hyperslab start */ - hsize_t stride[2]; /* Hyperslab stride */ - hsize_t count[2]; /* Hyperslab block count */ - hsize_t block[2]; /* Hyperslab block size */ - size_t nseq; /* # of sequences retrieved */ - size_t nbytes; /* # of bytes retrieved */ - hsize_t off[SEL_ITER_MAX_SEQ]; /* Offsets for retrieved sequences */ - size_t len[SEL_ITER_MAX_SEQ]; /* Lengths for retrieved sequences */ - H5S_sel_type sel_type; /* Selection type */ - unsigned sel_share; /* Whether to share selection with dataspace */ - unsigned sel_iter_flags; /* Flags for selection iterator creation */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Dataspace Selection Iterators\n")); - - /* Create dataspace */ - sid = H5Screate_simple(2, dims1, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Try creating selection iterator object with bad parameters */ - H5E_BEGIN_TRY - { /* Bad dataspace ID */ - iter_id = H5Ssel_iter_create(H5I_INVALID_HID, (size_t)1, (unsigned)0); - } - H5E_END_TRY - VERIFY(iter_id, FAIL, "H5Ssel_iter_create"); - H5E_BEGIN_TRY - { /* Bad element size */ - iter_id = H5Ssel_iter_create(sid, (size_t)0, (unsigned)0); - } - H5E_END_TRY - VERIFY(iter_id, FAIL, "H5Ssel_iter_create"); - H5E_BEGIN_TRY - { /* Bad flag(s) */ - iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)0xffff); - } - H5E_END_TRY - VERIFY(iter_id, FAIL, "H5Ssel_iter_create"); - - /* Try closing selection iterator, with bad parameters */ - H5E_BEGIN_TRY - { /* Invalid ID */ - ret = H5Ssel_iter_close(H5I_INVALID_HID); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_close"); - H5E_BEGIN_TRY - { /* Not a selection iterator ID */ - ret = H5Ssel_iter_close(sid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_close"); - - /* Try with no selection sharing, and with sharing */ - for (sel_share = 0; sel_share < 2; sel_share++) { - /* Set selection iterator sharing flags */ - if (sel_share) - sel_iter_flags = H5S_SEL_ITER_SHARE_WITH_DATASPACE; - else - sel_iter_flags = 0; - - /* Create selection iterator object */ - iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); - CHECK(iter_id, FAIL, "H5Ssel_iter_create"); - - /* Close selection iterator */ - ret = H5Ssel_iter_close(iter_id); - CHECK(ret, FAIL, "H5Ssel_iter_close"); - - /* Try closing selection iterator twice */ - H5E_BEGIN_TRY - { /* Invalid ID */ - ret = H5Ssel_iter_close(iter_id); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_close"); - - /* Create selection iterator object */ - iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); - CHECK(iter_id, FAIL, "H5Ssel_iter_create"); - - /* Try resetting selection iterator with bad parameters */ - H5E_BEGIN_TRY - { - ret = H5Ssel_iter_reset(H5I_INVALID_HID, sid); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_reset"); - H5E_BEGIN_TRY - { - ret = H5Ssel_iter_reset(iter_id, H5I_INVALID_HID); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_reset"); - - /* Try retrieving sequences, with bad parameters */ - H5E_BEGIN_TRY - { /* Invalid ID */ - ret = H5Ssel_iter_get_seq_list(H5I_INVALID_HID, (size_t)1, (size_t)1, &nseq, &nbytes, off, len); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); - H5E_BEGIN_TRY - { /* Invalid nseq pointer */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, NULL, &nbytes, off, len); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); - H5E_BEGIN_TRY - { /* Invalid nbytes pointer */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, NULL, off, len); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); - H5E_BEGIN_TRY - { /* Invalid offset array */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, &nbytes, NULL, len); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); - H5E_BEGIN_TRY - { /* Invalid length array */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, &nbytes, off, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list"); - - /* Close selection iterator */ - ret = H5Ssel_iter_close(iter_id); - CHECK(ret, FAIL, "H5Ssel_iter_close"); - - /* Test iterators on various basic selection types */ - for (sel_type = H5S_SEL_NONE; sel_type <= H5S_SEL_ALL; sel_type = (H5S_sel_type)(sel_type + 1)) { - switch (sel_type) { - case H5S_SEL_NONE: /* "None" selection */ - ret = H5Sselect_none(sid); - CHECK(ret, FAIL, "H5Sselect_none"); - break; - - case H5S_SEL_POINTS: /* Point selection */ - /* Select sequence of ten points */ - coord1[0][0] = 0; - coord1[0][1] = 9; - coord1[1][0] = 1; - coord1[1][1] = 2; - coord1[2][0] = 2; - coord1[2][1] = 4; - coord1[3][0] = 0; - coord1[3][1] = 6; - coord1[4][0] = 1; - coord1[4][1] = 8; - coord1[5][0] = 2; - coord1[5][1] = 10; - coord1[6][0] = 0; - coord1[6][1] = 11; - coord1[7][0] = 1; - coord1[7][1] = 4; - coord1[8][0] = 2; - coord1[8][1] = 1; - coord1[9][0] = 0; - coord1[9][1] = 3; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, - (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - break; - - case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ - /* Select regular hyperslab */ - start[0] = 3; - start[1] = 0; - stride[0] = 2; - stride[1] = 2; - count[0] = 2; - count[1] = 5; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - break; - - case H5S_SEL_ALL: /* "All" selection */ - ret = H5Sselect_all(sid); - CHECK(ret, FAIL, "H5Sselect_all"); - break; - - case H5S_SEL_ERROR: - case H5S_SEL_N: - default: - assert(0 && "Can't occur"); - break; - } /* end switch */ - - /* Create selection iterator object */ - iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); - CHECK(iter_id, FAIL, "H5Ssel_iter_create"); - - /* Try retrieving no sequences, with 0 for maxseq & maxbytes */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)0, (size_t)1, &nseq, &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)0, &nseq, &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); - - /* Try retrieving all sequences */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, - &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - - /* Check results from retrieving sequence list */ - switch (sel_type) { - case H5S_SEL_NONE: /* "None" selection */ - VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_POINTS: /* Point selection */ - VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ - VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_ALL: /* "All" selection */ - VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_ERROR: - case H5S_SEL_N: - default: - assert(0 && "Can't occur"); - break; - } /* end switch */ - - /* Close selection iterator */ - ret = H5Ssel_iter_close(iter_id); - CHECK(ret, FAIL, "H5Ssel_iter_close"); - } /* end for */ - - /* Create selection iterator object */ - iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); - CHECK(iter_id, FAIL, "H5Ssel_iter_create"); - - /* Test iterators on various basic selection types using - * H5Ssel_iter_reset instead of creating multiple iterators */ - for (sel_type = H5S_SEL_NONE; sel_type <= H5S_SEL_ALL; sel_type = (H5S_sel_type)(sel_type + 1)) { - switch (sel_type) { - case H5S_SEL_NONE: /* "None" selection */ - ret = H5Sselect_none(sid); - CHECK(ret, FAIL, "H5Sselect_none"); - break; - - case H5S_SEL_POINTS: /* Point selection */ - /* Select sequence of ten points */ - coord1[0][0] = 0; - coord1[0][1] = 9; - coord1[1][0] = 1; - coord1[1][1] = 2; - coord1[2][0] = 2; - coord1[2][1] = 4; - coord1[3][0] = 0; - coord1[3][1] = 6; - coord1[4][0] = 1; - coord1[4][1] = 8; - coord1[5][0] = 2; - coord1[5][1] = 10; - coord1[6][0] = 0; - coord1[6][1] = 11; - coord1[7][0] = 1; - coord1[7][1] = 4; - coord1[8][0] = 2; - coord1[8][1] = 1; - coord1[9][0] = 0; - coord1[9][1] = 3; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, - (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - break; - - case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ - /* Select regular hyperslab */ - start[0] = 3; - start[1] = 0; - stride[0] = 2; - stride[1] = 2; - count[0] = 2; - count[1] = 5; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - break; - - case H5S_SEL_ALL: /* "All" selection */ - ret = H5Sselect_all(sid); - CHECK(ret, FAIL, "H5Sselect_all"); - break; - - case H5S_SEL_ERROR: - case H5S_SEL_N: - default: - assert(0 && "Can't occur"); - break; - } /* end switch */ - - /* Try retrieving no sequences, with 0 for maxseq & maxbytes */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)0, (size_t)1, &nseq, &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)0, &nseq, &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); - - /* Reset iterator */ - ret = H5Ssel_iter_reset(iter_id, sid); - CHECK(ret, FAIL, "H5Ssel_iter_reset"); - - /* Try retrieving all sequences */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, - &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - - /* Check results from retrieving sequence list */ - switch (sel_type) { - case H5S_SEL_NONE: /* "None" selection */ - VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_POINTS: /* Point selection */ - VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ - VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_ALL: /* "All" selection */ - VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_ERROR: - case H5S_SEL_N: - default: - assert(0 && "Can't occur"); - break; - } /* end switch */ - - /* Reset iterator */ - ret = H5Ssel_iter_reset(iter_id, sid); - CHECK(ret, FAIL, "H5Ssel_iter_reset"); - - /* Try retrieving all sequences again */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, - &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - - /* Check results from retrieving sequence list */ - switch (sel_type) { - case H5S_SEL_NONE: /* "None" selection */ - VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_POINTS: /* Point selection */ - VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_HYPERSLABS: /* Hyperslab selection */ - VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_ALL: /* "All" selection */ - VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list"); - break; - - case H5S_SEL_ERROR: - case H5S_SEL_N: - default: - assert(0 && "Can't occur"); - break; - } /* end switch */ - - /* Reset iterator */ - ret = H5Ssel_iter_reset(iter_id, sid); - CHECK(ret, FAIL, "H5Ssel_iter_reset"); - } /* end for */ - - /* Close selection iterator */ - ret = H5Ssel_iter_close(iter_id); - CHECK(ret, FAIL, "H5Ssel_iter_close"); - - /* Point selection which will merge into smaller # of sequences */ - coord1[0][0] = 0; - coord1[0][1] = 9; - coord1[1][0] = 0; - coord1[1][1] = 10; - coord1[2][0] = 0; - coord1[2][1] = 11; - coord1[3][0] = 0; - coord1[3][1] = 6; - coord1[4][0] = 1; - coord1[4][1] = 8; - coord1[5][0] = 2; - coord1[5][1] = 10; - coord1[6][0] = 0; - coord1[6][1] = 11; - coord1[7][0] = 1; - coord1[7][1] = 4; - coord1[8][0] = 1; - coord1[8][1] = 5; - coord1[9][0] = 1; - coord1[9][1] = 6; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Create selection iterator object */ - iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); - CHECK(iter_id, FAIL, "H5Ssel_iter_create"); - - /* Try retrieving all sequences */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, - &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - - /* Reset iterator */ - ret = H5Ssel_iter_reset(iter_id, sid); - CHECK(ret, FAIL, "H5Ssel_iter_reset"); - - /* Try retrieving all sequences again */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, - &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list"); - - /* Close selection iterator */ - ret = H5Ssel_iter_close(iter_id); - CHECK(ret, FAIL, "H5Ssel_iter_close"); - - /* Select irregular hyperslab, which will merge into smaller # of sequences */ - start[0] = 3; - start[1] = 0; - stride[0] = 2; - stride[1] = 2; - count[0] = 2; - count[1] = 5; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - start[0] = 3; - start[1] = 3; - stride[0] = 2; - stride[1] = 2; - count[0] = 2; - count[1] = 5; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create selection iterator object */ - iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags); - CHECK(iter_id, FAIL, "H5Ssel_iter_create"); - - /* Try retrieving all sequences */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, - &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 20, "H5Ssel_iter_get_seq_list"); - - /* Reset iterator */ - ret = H5Ssel_iter_reset(iter_id, sid); - CHECK(ret, FAIL, "H5Ssel_iter_reset"); - - /* Try retrieving all sequences again */ - ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq, - &nbytes, off, len); - CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list"); - VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list"); - VERIFY(nbytes, 20, "H5Ssel_iter_get_seq_list"); - - /* Close selection iterator */ - ret = H5Ssel_iter_close(iter_id); - CHECK(ret, FAIL, "H5Ssel_iter_close"); - - } /* end for */ - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_sel_iter() */ - -/**************************************************************** -** -** test_select_intersect_block(): Test selections on dataspace, -** verify that "intersect block" routine is working correctly. -** -****************************************************************/ -static void -test_select_intersect_block(void) -{ - hid_t sid; /* Dataspace ID */ - hsize_t dims1[] = {6, 12}; /* 2-D Dataspace dimensions */ - hsize_t block_start[] = {1, 3}; /* Start offset for block */ - hsize_t block_end[] = {2, 5}; /* End offset for block */ - hsize_t block_end2[] = {0, 5}; /* Bad end offset for block */ - hsize_t block_end3[] = {2, 2}; /* Another bad end offset for block */ - hsize_t block_end4[] = {1, 3}; /* End offset that makes a single element block */ - hsize_t coord[10][2]; /* Coordinates for point selection */ - hsize_t start[2]; /* Starting location of hyperslab */ - hsize_t stride[2]; /* Stride of hyperslab */ - hsize_t count[2]; /* Element count of hyperslab */ - hsize_t block[2]; /* Block size of hyperslab */ - htri_t status; /* Intersection status */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Dataspace Selection Block Intersection\n")); - - /* Create dataspace */ - sid = H5Screate_simple(2, dims1, NULL); - CHECK(sid, FAIL, "H5Screate_simple"); - - /* Try intersection calls with bad parameters */ - H5E_BEGIN_TRY - { /* Bad dataspace ID */ - status = H5Sselect_intersect_block(H5I_INVALID_HID, block_start, block_end); - } - H5E_END_TRY - VERIFY(status, FAIL, "H5Sselect_intersect_block"); - H5E_BEGIN_TRY - { /* Bad start pointer */ - status = H5Sselect_intersect_block(sid, NULL, block_end); - } - H5E_END_TRY - VERIFY(status, FAIL, "H5Sselect_intersect_block"); - H5E_BEGIN_TRY - { /* Bad end pointer */ - status = H5Sselect_intersect_block(sid, block_start, NULL); - } - H5E_END_TRY - VERIFY(status, FAIL, "H5Sselect_intersect_block"); - H5E_BEGIN_TRY - { /* Invalid block */ - status = H5Sselect_intersect_block(sid, block_start, block_end2); - } - H5E_END_TRY - VERIFY(status, FAIL, "H5Sselect_intersect_block"); - H5E_BEGIN_TRY - { /* Another invalid block */ - status = H5Sselect_intersect_block(sid, block_start, block_end3); - } - H5E_END_TRY - VERIFY(status, FAIL, "H5Sselect_intersect_block"); - - /* Set selection to 'none' */ - ret = H5Sselect_none(sid); - CHECK(ret, FAIL, "H5Sselect_none"); - - /* Test block intersection with 'none' selection (always false) */ - status = H5Sselect_intersect_block(sid, block_start, block_end); - VERIFY(status, false, "H5Sselect_intersect_block"); - - /* Set selection to 'all' */ - ret = H5Sselect_all(sid); - CHECK(ret, FAIL, "H5Sselect_all"); - - /* Test block intersection with 'all' selection (always true) */ - status = H5Sselect_intersect_block(sid, block_start, block_end); - VERIFY(status, true, "H5Sselect_intersect_block"); - - /* Select sequence of ten points */ - coord[0][0] = 0; - coord[0][1] = 10; - coord[1][0] = 1; - coord[1][1] = 2; - coord[2][0] = 2; - coord[2][1] = 4; - coord[3][0] = 0; - coord[3][1] = 6; - coord[4][0] = 1; - coord[4][1] = 8; - coord[5][0] = 2; - coord[5][1] = 11; - coord[6][0] = 0; - coord[6][1] = 4; - coord[7][0] = 1; - coord[7][1] = 0; - coord[8][0] = 2; - coord[8][1] = 1; - coord[9][0] = 0; - coord[9][1] = 3; - ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)10, (const hsize_t *)coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - /* Test block intersection with 'point' selection */ - status = H5Sselect_intersect_block(sid, block_start, block_end); - VERIFY(status, true, "H5Sselect_intersect_block"); - status = H5Sselect_intersect_block(sid, block_start, block_end4); - VERIFY(status, false, "H5Sselect_intersect_block"); - - /* Select single 4x6 hyperslab block at (2,1) */ - start[0] = 2; - start[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = 4; - count[1] = 6; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Test block intersection with single 'hyperslab' selection */ - status = H5Sselect_intersect_block(sid, block_start, block_end); - VERIFY(status, true, "H5Sselect_intersect_block"); - status = H5Sselect_intersect_block(sid, block_start, block_end4); - VERIFY(status, false, "H5Sselect_intersect_block"); - - /* 'OR' another hyperslab block in, making an irregular hyperslab selection */ - start[0] = 3; - start[1] = 2; - stride[0] = 1; - stride[1] = 1; - count[0] = 4; - count[1] = 6; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Test block intersection with 'hyperslab' selection */ - status = H5Sselect_intersect_block(sid, block_start, block_end); - VERIFY(status, true, "H5Sselect_intersect_block"); - status = H5Sselect_intersect_block(sid, block_start, block_end4); - VERIFY(status, false, "H5Sselect_intersect_block"); - - /* Select regular, strided hyperslab selection */ - start[0] = 2; - start[1] = 1; - stride[0] = 2; - stride[1] = 2; - count[0] = 2; - count[1] = 4; - block[0] = 1; - block[1] = 1; - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Test block intersection with single 'hyperslab' selection */ - status = H5Sselect_intersect_block(sid, block_start, block_end); - VERIFY(status, true, "H5Sselect_intersect_block"); - status = H5Sselect_intersect_block(sid, block_start, block_end4); - VERIFY(status, false, "H5Sselect_intersect_block"); - - /* Close dataspace */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); -} /* test_select_intersect_block() */ - -/**************************************************************** -** -** test_hyper_io_1d(): -** Test to verify all the selected 10th element in the 1-d file -** dataspace is read correctly into the 1-d contiguous memory space. -** This is modeled after the test scenario described in HDFFV-10585 -** that demonstrated the hyperslab slowness. A fix to speed up -** performance is in place to handle the special case for 1-d disjoint -** file dataspace into 1-d single block contiguous memory space. -** -****************************************************************/ -static void -test_hyper_io_1d(void) -{ - hid_t fid; /* File ID */ - hid_t did; /* Dataset ID */ - hid_t sid, mid; /* Dataspace IDs */ - hid_t dcpl; /* Dataset creation property list ID */ - hsize_t dims[1], maxdims[1], dimsm[1]; /* Dataset dimension sizes */ - hsize_t chunk_dims[1]; /* Chunk dimension size */ - hsize_t offset[1]; /* Starting offset for hyperslab */ - hsize_t stride[1]; /* Distance between blocks in the hyperslab selection */ - hsize_t count[1]; /* # of blocks in the the hyperslab selection */ - hsize_t block[1]; /* Size of block in the hyperslab selection */ - unsigned int wdata[CHUNKSZ]; /* Data to be written */ - unsigned int rdata[NUM_ELEMENTS / 10]; /* Data to be read */ - herr_t ret; /* Generic return value */ - unsigned i; /* Local index variable */ - - /* Output message about test being performed */ - MESSAGE(6, ("Testing Hyperslab I/O for 1-d single block memory space\n")); - - for (i = 0; i < CHUNKSZ; i++) - wdata[i] = i; - - /* Create the file file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); - - /* Create file dataspace */ - dims[0] = CHUNKSZ; - maxdims[0] = H5S_UNLIMITED; - sid = H5Screate_simple(RANK, dims, maxdims); - CHECK(sid, H5I_INVALID_HID, "H5Pcreate"); - - /* Create memory dataspace */ - dimsm[0] = CHUNKSZ; - mid = H5Screate_simple(RANK, dimsm, NULL); - CHECK(mid, H5I_INVALID_HID, "H5Pcreate"); - - /* Set up to create a chunked dataset */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate"); - - chunk_dims[0] = CHUNKSZ; - ret = H5Pset_chunk(dcpl, RANK, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create a chunked dataset */ - did = H5Dcreate2(fid, DNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); - - /* Set up hyperslab selection for file dataspace */ - offset[0] = 0; - stride[0] = 1; - count[0] = 1; - block[0] = CHUNKSZ; - - /* Write to each chunk in the dataset */ - for (i = 0; i < NUMCHUNKS; i++) { - /* Set the hyperslab selection */ - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write to the dataset */ - ret = H5Dwrite(did, H5T_NATIVE_INT, mid, sid, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Extend the dataset's dataspace */ - if (i < (NUMCHUNKS - 1)) { - offset[0] = offset[0] + CHUNKSZ; - dims[0] = dims[0] + CHUNKSZ; - ret = H5Dset_extent(did, dims); - CHECK(ret, FAIL, "H5Dset_extent"); - - /* Get the dataset's current dataspace */ - sid = H5Dget_space(did); - CHECK(sid, H5I_INVALID_HID, "H5Dget_space"); - } - } - - /* Closing */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(mid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file */ - fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid, H5I_INVALID_HID, "H5Fopen"); - - /* Open the dataset */ - did = H5Dopen2(fid, DNAME, H5P_DEFAULT); - CHECK(did, H5I_INVALID_HID, "H5Dopen"); - - /* Set up to read every 10th element in file dataspace */ - offset[0] = 1; - stride[0] = 10; - count[0] = NUM_ELEMENTS / 10; - block[0] = 1; - - /* Get the dataset's dataspace */ - sid = H5Dget_space(did); - CHECK(sid, H5I_INVALID_HID, "H5Dget_space"); - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, stride, count, block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Set up contiguous memory dataspace for the selected elements */ - dimsm[0] = count[0]; - mid = H5Screate_simple(RANK, dimsm, NULL); - CHECK(mid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Read all the selected 10th elements in the dataset into "rdata" */ - ret = H5Dread(did, H5T_NATIVE_INT, mid, sid, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Verify data read is correct */ - for (i = 0; i < 6; i += 2) { - VERIFY(rdata[i], 1, "H5Dread\n"); - VERIFY(rdata[i + 1], 11, "H5Dread\n"); - } - - /* Closing */ - ret = H5Sclose(mid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* test_hyper_io_1d() */ - -/**************************************************************** -** -** test_h5s_set_extent_none: -** Test to verify the behavior of dataspace code when passed -** a dataspace modified by H5Sset_extent_none(). -** -****************************************************************/ -static void -test_h5s_set_extent_none(void) -{ - hid_t sid = H5I_INVALID_HID; - hid_t dst_sid = H5I_INVALID_HID; - hid_t null_sid = H5I_INVALID_HID; - int rank = 1; - hsize_t current_dims = 123; - H5S_class_t cls; - int out_rank; - hsize_t out_dims; - hsize_t out_maxdims; - hssize_t out_points; - htri_t equal; - herr_t ret; - - /* Specific values here don't matter as we're just going to reset */ - sid = H5Screate_simple(rank, ¤t_dims, NULL); - CHECK(sid, H5I_INVALID_HID, "H5Screate_simple"); - - /* Dataspace class will be H5S_NULL after this. - * In versions prior to 1.10.7 / 1.12.1 this would produce a - * dataspace with the internal H5S_NO_CLASS class. - */ - ret = H5Sset_extent_none(sid); - CHECK(ret, FAIL, "H5Sset_extent_none"); - cls = H5Sget_simple_extent_type(sid); - VERIFY(cls, H5S_NULL, "H5Sget_simple_extent_type"); - - /* Extent getters should generate normal results and not segfault. - */ - out_rank = H5Sget_simple_extent_dims(sid, &out_dims, &out_maxdims); - VERIFY(out_rank, 0, "H5Sget_simple_extent_dims"); - out_rank = H5Sget_simple_extent_ndims(sid); - VERIFY(out_rank, 0, "H5Sget_simple_extent_ndims"); - out_points = H5Sget_simple_extent_npoints(sid); - VERIFY(out_points, 0, "H5Sget_simple_extent_npoints"); - - /* Check that copying the new (non-)extent works. - */ - dst_sid = H5Screate_simple(rank, ¤t_dims, NULL); - CHECK(dst_sid, H5I_INVALID_HID, "H5Screate_simple"); - ret = H5Sextent_copy(dst_sid, sid); - CHECK(ret, FAIL, "H5Sextent_copy"); - - /* Check that H5Sset_extent_none() produces the same extent as - * H5Screate(H5S_NULL). - */ - null_sid = H5Screate(H5S_NULL); - CHECK(null_sid, H5I_INVALID_HID, "H5Screate"); - equal = H5Sextent_equal(sid, null_sid); - VERIFY(equal, true, "H5Sextent_equal"); - - /* Close */ - ret = H5Sclose(sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(dst_sid); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Sclose(null_sid); - CHECK(ret, FAIL, "H5Sclose"); - -} /* test_h5s_set_extent_none() */ - -/**************************************************************** -** -** test_select(): Main H5S selection testing routine. -** -****************************************************************/ -void -test_select(void) -{ - hid_t plist_id; /* Property list for reading random hyperslabs */ - hid_t fapl; /* Property list accessing the file */ - int mdc_nelmts; /* Metadata number of elements */ - size_t rdcc_nelmts; /* Raw data number of elements */ - size_t rdcc_nbytes; /* Raw data number of bytes */ - double rdcc_w0; /* Raw data write percentage */ - hssize_t offset[SPACE7_RANK] = {1, 1}; /* Offset for testing selection offsets */ - const char *env_h5_drvr; /* File Driver value from environment */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Selections\n")); - - /* Get the VFD to use */ - env_h5_drvr = getenv(HDF5_DRIVER); - if (env_h5_drvr == NULL) - env_h5_drvr = "nomatch"; - - /* Create a dataset transfer property list */ - plist_id = H5Pcreate(H5P_DATASET_XFER); - CHECK(plist_id, FAIL, "H5Pcreate"); - - /* test I/O with a very small buffer for reads */ - ret = H5Pset_buffer(plist_id, (size_t)59, NULL, NULL); - CHECK(ret, FAIL, "H5Pset_buffer"); - - /* These next tests use the same file */ - test_select_hyper(H5P_DEFAULT); /* Test basic H5S hyperslab selection code */ - test_select_hyper(plist_id); /* Test basic H5S hyperslab selection code */ - test_select_point(H5P_DEFAULT); /* Test basic H5S element selection code, also tests appending to existing - element selections */ - test_select_point(plist_id); /* Test basic H5S element selection code, also tests appending to existing - element selections */ - test_select_all(H5P_DEFAULT); /* Test basic all & none selection code */ - test_select_all(plist_id); /* Test basic all & none selection code */ - test_select_all_hyper(H5P_DEFAULT); /* Test basic all & none selection code */ - test_select_all_hyper(plist_id); /* Test basic all & none selection code */ - - /* These next tests use the same file */ - test_select_combo(); /* Test combined hyperslab & element selection code */ - test_select_hyper_stride(H5P_DEFAULT); /* Test strided hyperslab selection code */ - test_select_hyper_stride(plist_id); /* Test strided hyperslab selection code */ - test_select_hyper_contig(H5T_STD_U16LE, H5P_DEFAULT); /* Test contiguous hyperslab selection code */ - test_select_hyper_contig(H5T_STD_U16LE, plist_id); /* Test contiguous hyperslab selection code */ - test_select_hyper_contig(H5T_STD_U16BE, H5P_DEFAULT); /* Test contiguous hyperslab selection code */ - test_select_hyper_contig(H5T_STD_U16BE, plist_id); /* Test contiguous hyperslab selection code */ - test_select_hyper_contig2(H5T_STD_U16LE, - H5P_DEFAULT); /* Test more contiguous hyperslab selection cases */ - test_select_hyper_contig2(H5T_STD_U16LE, plist_id); /* Test more contiguous hyperslab selection cases */ - test_select_hyper_contig2(H5T_STD_U16BE, - H5P_DEFAULT); /* Test more contiguous hyperslab selection cases */ - test_select_hyper_contig2(H5T_STD_U16BE, plist_id); /* Test more contiguous hyperslab selection cases */ - test_select_hyper_contig3(H5T_STD_U16LE, - H5P_DEFAULT); /* Test yet more contiguous hyperslab selection cases */ - test_select_hyper_contig3(H5T_STD_U16LE, - plist_id); /* Test yet more contiguous hyperslab selection cases */ - test_select_hyper_contig3(H5T_STD_U16BE, - H5P_DEFAULT); /* Test yet more contiguous hyperslab selection cases */ - test_select_hyper_contig3(H5T_STD_U16BE, - plist_id); /* Test yet more contiguous hyperslab selection cases */ -#if 0 - test_select_hyper_contig_dr(H5T_STD_U16LE, H5P_DEFAULT); - test_select_hyper_contig_dr(H5T_STD_U16LE, plist_id); - test_select_hyper_contig_dr(H5T_STD_U16BE, H5P_DEFAULT); - test_select_hyper_contig_dr(H5T_STD_U16BE, plist_id); -#else - printf("** SKIPPED a test due to file creation issues\n"); -#endif -#if 0 - test_select_hyper_checker_board_dr(H5T_STD_U16LE, H5P_DEFAULT); - test_select_hyper_checker_board_dr(H5T_STD_U16LE, plist_id); - test_select_hyper_checker_board_dr(H5T_STD_U16BE, H5P_DEFAULT); - test_select_hyper_checker_board_dr(H5T_STD_U16BE, plist_id); -#else - printf("** SKIPPED a test due to assertion in HDF5\n"); -#endif - test_select_hyper_copy(); /* Test hyperslab selection copying code */ - test_select_point_copy(); /* Test point selection copying code */ - test_select_hyper_offset(); /* Test selection offset code with hyperslabs */ - test_select_hyper_offset2(); /* Test more selection offset code with hyperslabs */ - test_select_point_offset(); /* Test selection offset code with elements */ - test_select_hyper_union(); /* Test hyperslab union code */ - - /* Fancy hyperslab API tests */ - test_select_hyper_union_stagger(); /* Test hyperslab union code for staggered slabs */ - test_select_hyper_union_3d(); /* Test hyperslab union code for 3-D dataset */ - test_select_hyper_valid_combination(); /* Test different input combinations */ - - /* The following tests are currently broken with the Direct VFD */ - if (strcmp(env_h5_drvr, "direct") != 0) { - test_select_hyper_and_2d(); /* Test hyperslab intersection (AND) code for 2-D dataset */ - test_select_hyper_xor_2d(); /* Test hyperslab XOR code for 2-D dataset */ - test_select_hyper_notb_2d(); /* Test hyperslab NOTB code for 2-D dataset */ - test_select_hyper_nota_2d(); /* Test hyperslab NOTA code for 2-D dataset */ - } - - /* test the random hyperslab I/O with the default property list for reading */ - test_select_hyper_union_random_5d(H5P_DEFAULT); /* Test hyperslab union code for random 5-D hyperslabs */ - - /* test random hyperslab I/O with a small buffer for reads */ - test_select_hyper_union_random_5d(plist_id); /* Test hyperslab union code for random 5-D hyperslabs */ - - /* Create a dataset transfer property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl, FAIL, "H5Pcreate"); - - /* Get the default file access properties for caching */ - ret = H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); - CHECK(ret, FAIL, "H5Pget_cache"); - - /* Increase the size of the raw data cache */ - rdcc_nbytes = 10 * 1024 * 1024; - - /* Set the file access properties for caching */ - ret = H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); - CHECK(ret, FAIL, "H5Pset_cache"); - - /* Test reading in a large hyperslab with a chunked dataset */ - test_select_hyper_chunk(fapl, H5P_DEFAULT); - - /* Test reading in a large hyperslab with a chunked dataset a small amount at a time */ - test_select_hyper_chunk(fapl, plist_id); - - /* Close file access property list */ - ret = H5Pclose(fapl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(plist_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* More tests for checking validity of selections */ - test_select_valid(); - - /* Tests for combining "all" and "none" selections with hyperslabs */ - test_select_combine(); - - /* Test filling selections */ - /* (Also tests iterating through each selection */ - test_select_fill_all(); - test_select_fill_point(NULL); - test_select_fill_point(offset); - test_select_fill_hyper_simple(NULL); - test_select_fill_hyper_simple(offset); - test_select_fill_hyper_regular(NULL); - test_select_fill_hyper_regular(offset); - test_select_fill_hyper_irregular(NULL); - test_select_fill_hyper_irregular(offset); - - /* Test 0-sized selections */ - test_select_none(); - - /* Test selections on scalar dataspaces */ - test_scalar_select(); - test_scalar_select2(); - test_scalar_select3(); - - /* Test "same shape" routine */ - test_shape_same(); - - /* Test "same shape" routine for selections of different rank */ - test_shape_same_dr(); - - /* Test "re-build" routine */ - test_space_rebuild(); - - /* Test "update diminfo" routine */ - test_space_update_diminfo(); - - /* Test point selections in chunked datasets */ - test_select_point_chunk(); - - /* Test scalar dataspaces in chunked datasets */ - test_select_scalar_chunk(); -#if 0 - /* Test using selection offset on hyperslab in chunked dataset */ - test_select_hyper_chunk_offset(); - test_select_hyper_chunk_offset2(); -#else - printf("** SKIPPED a test due to assertion in HDF5\n"); -#endif - - /* Test selection bounds with & without offsets */ - test_select_bounds(); - - /* Test 'regular' hyperslab query routines */ - test_hyper_regular(); - - /* Test unlimited hyperslab selections */ - test_hyper_unlim(); - - /* Test the consistency of internal data structures of selection */ - test_internal_consistency(); - - /* Test irregular selection I/O */ - test_irreg_io(); - - /* Test selection iterators */ - test_sel_iter(); - - /* Test selection intersection with block */ - test_select_intersect_block(); - - /* Test reading of 1-d disjoint file space to 1-d single block memory space */ - test_hyper_io_1d(); - - /* Test H5Sset_extent_none() functionality after we updated it to set - * the class to H5S_NULL instead of H5S_NO_CLASS. - */ - test_h5s_set_extent_none(); - -} /* test_select() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_select - * - * Purpose: Cleanup temporary test files - * - * Return: none - * - *------------------------------------------------------------------------- - */ -void -cleanup_select(void) -{ - H5Fdelete(FILENAME, H5P_DEFAULT); -} diff --git a/test/API/ttime.c b/test/API/ttime.c deleted file mode 100644 index a4a5ccb5343..00000000000 --- a/test/API/ttime.c +++ /dev/null @@ -1,225 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: ttime - * - * Test the Time Datatype functionality - * - *************************************************************/ - -#include "testhdf5.h" - -#define DATAFILE "ttime.h5" -#ifdef NOT_YET -#define DATASETNAME "Dataset" -#endif /* NOT_YET */ - -/**************************************************************** -** -** test_time_commit(): Test committing time datatypes to a file -** -****************************************************************/ -static void -test_time_commit(void) -{ - hid_t file_id, tid; /* identifiers */ - herr_t status; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Committing Time Datatypes\n")); - - /* Create a new file using default properties. */ - file_id = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - tid = H5Tcopy(H5T_UNIX_D32LE); - CHECK(tid, FAIL, "H5Tcopy"); - status = H5Tcommit2(file_id, "Committed D32LE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(status, FAIL, "H5Tcommit2"); - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - tid = H5Tcopy(H5T_UNIX_D32BE); - CHECK(tid, FAIL, "H5Tcopy"); - status = H5Tcommit2(file_id, "Committed D32BE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(status, FAIL, "H5Tcommit2"); - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - tid = H5Tcopy(H5T_UNIX_D64LE); - CHECK(tid, FAIL, "H5Tcopy"); - status = H5Tcommit2(file_id, "Committed D64LE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(status, FAIL, "H5Tcommit2"); - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - tid = H5Tcopy(H5T_UNIX_D64BE); - CHECK(tid, FAIL, "H5Tcopy"); - status = H5Tcommit2(file_id, "Committed D64BE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(status, FAIL, "H5Tcommit2"); - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - /* Close the file. */ - status = H5Fclose(file_id); - CHECK(status, FAIL, "H5Fclose"); - - file_id = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - - tid = H5Topen2(file_id, "Committed D32LE type", H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - - if (!H5Tequal(tid, H5T_UNIX_D32LE)) - TestErrPrintf("H5T_UNIX_D32LE datatype not found\n"); - - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - tid = H5Topen2(file_id, "Committed D32BE type", H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - - if (!H5Tequal(tid, H5T_UNIX_D32BE)) - TestErrPrintf("H5T_UNIX_D32BE datatype not found\n"); - - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - tid = H5Topen2(file_id, "Committed D64LE type", H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - - if (!H5Tequal(tid, H5T_UNIX_D64LE)) - TestErrPrintf("H5T_UNIX_D64LE datatype not found"); - - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - tid = H5Topen2(file_id, "Committed D64BE type", H5P_DEFAULT); - CHECK(tid, FAIL, "H5Topen2"); - - if (!H5Tequal(tid, H5T_UNIX_D64BE)) - TestErrPrintf("H5T_UNIX_D64BE datatype not found"); - - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Fclose(file_id); - CHECK(status, FAIL, "H5Fclose"); -} - -#ifdef NOT_YET -/**************************************************************** -** -** test_time_io(): Test writing time data to a dataset -** -****************************************************************/ -static void -test_time_io(void) -{ - hid_t fid; /* File identifier */ - hid_t dsid; /* Dataset identifier */ - hid_t tid; /* Datatype identifier */ - hid_t sid; /* Dataspace identifier */ - time_t timenow, timethen; /* Times */ - herr_t status; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Committing Time Datatypes\n")); - - /* Create a new file using default properties. */ - fid = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - /* Create a scalar dataspace */ - sid = H5Screate(H5S_SCALAR); - CHECK(sid, FAIL, "H5Screate"); - - /* Create a dataset with a time datatype */ - dsid = H5Dcreate2(fid, DATASETNAME, H5T_UNIX_D32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dcreate2"); - - /* Initialize time data value */ - timenow = HDtime(NULL); - - /* Write time to dataset */ - status = H5Dwrite(dsid, H5T_UNIX_D32LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &timenow); - CHECK(status, FAIL, "H5Dwrite"); - - /* Close objects */ - status = H5Dclose(dsid); - CHECK(status, FAIL, "H5Dclose"); - - status = H5Sclose(sid); - CHECK(status, FAIL, "H5Sclose"); - - status = H5Fclose(fid); - CHECK(status, FAIL, "H5Fclose"); - - /* Open file and dataset, read time back and print it in calendar format */ - fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - dsid = H5Dopen2(fid, DATASETNAME, H5P_DEFAULT); - CHECK(dsid, FAIL, "H5Dopen2"); - - tid = H5Dget_type(dsid); - CHECK(tid, FAIL, "H5Dget_type"); - if (H5Tget_class(tid) == H5T_TIME) - fprintf(stderr, "datatype class is H5T_TIME\n"); - status = H5Tclose(tid); - CHECK(status, FAIL, "H5Tclose"); - - status = H5Dread(dsid, H5T_UNIX_D32LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &timethen); - CHECK(status, FAIL, "H5Dread"); - fprintf(stderr, "time written was: %s\n", HDctime(&timethen)); - - status = H5Dclose(dsid); - CHECK(status, FAIL, "H5Dclose"); - - status = H5Fclose(fid); - CHECK(status, FAIL, "H5Fclose"); -} -#endif /* NOT_YET */ - -/**************************************************************** -** -** test_time(): Main time datatype testing routine. -** -****************************************************************/ -void -test_time(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing Time Datatypes\n")); - - test_time_commit(); /* Test committing time datatypes to a file */ -#ifdef NOT_YET - test_time_io(); /* Test writing time data to a dataset */ -#endif /* NOT_YET */ - -} /* test_time() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_time - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_time(void) -{ - H5Fdelete(DATAFILE, H5P_DEFAULT); -} diff --git a/test/API/tunicode.c b/test/API/tunicode.c deleted file mode 100644 index 8b404f2fef9..00000000000 --- a/test/API/tunicode.c +++ /dev/null @@ -1,867 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* Unicode test */ -#include "testhdf5.h" - -#define NUM_CHARS 16 -#define MAX_STRING_LENGTH ((NUM_CHARS * 4) + 1) /* Max length in bytes */ -#define MAX_PATH_LENGTH (MAX_STRING_LENGTH + 20) /* Max length in bytes */ -#define MAX_CODE_POINT 0x200000 -#define FILENAME "unicode.h5" -/* A buffer to hold two copies of the UTF-8 string */ -#define LONG_BUF_SIZE (2 * MAX_STRING_LENGTH + 4) - -#define DSET1_NAME "fl_string_dataset" -#define DSET3_NAME "dataset3" -#define DSET4_NAME "dataset4" -#define VL_DSET1_NAME "vl_dset_1" -#define GROUP1_NAME "group1" -#define GROUP2_NAME "group2" -#define GROUP3_NAME "group3" -#define GROUP4_NAME "group4" - -#define RANK 1 -#define COMP_INT_VAL 7 -#define COMP_FLOAT_VAL (-42.0F) -#define COMP_DOUBLE_VAL 42.0 - -/* Test function prototypes */ -void test_fl_string(hid_t fid, const char *string); -void test_strpad(hid_t fid, const char *string); -void test_vl_string(hid_t fid, const char *string); -void test_objnames(hid_t fid, const char *string); -void test_attrname(hid_t fid, const char *string); -void test_compound(hid_t fid, const char *string); -void test_enum(hid_t fid, const char *string); -void test_opaque(hid_t fid, const char *string); - -/* Utility function prototypes */ -static hid_t mkstr(size_t len, H5T_str_t strpad); -unsigned int write_char(unsigned int c, char *test_string, unsigned int cur_pos); -void dump_string(const char *string); - -/* - * test_fl_string - * Tests that UTF-8 can be used for fixed-length string data. - * Writes the string to a dataset and reads it back again. - */ -void -test_fl_string(hid_t fid, const char *string) -{ - hid_t dtype_id, space_id, dset_id; - hsize_t dims = 1; - char read_buf[MAX_STRING_LENGTH]; - H5T_cset_t cset; - herr_t ret; - - /* Create the datatype, ensure that the character set behaves - * correctly (it should default to ASCII and can be set to UTF8) - */ - dtype_id = H5Tcopy(H5T_C_S1); - CHECK(dtype_id, FAIL, "H5Tcopy"); - ret = H5Tset_size(dtype_id, (size_t)MAX_STRING_LENGTH); - CHECK(ret, FAIL, "H5Tset_size"); - cset = H5Tget_cset(dtype_id); - VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset"); - ret = H5Tset_cset(dtype_id, H5T_CSET_UTF8); - CHECK(ret, FAIL, "H5Tset_cset"); - cset = H5Tget_cset(dtype_id); - VERIFY(cset, H5T_CSET_UTF8, "H5Tget_cset"); - - /* Create dataspace for a dataset */ - space_id = H5Screate_simple(RANK, &dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - /* Create a dataset */ - dset_id = H5Dcreate2(fid, DSET1_NAME, dtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - /* Write UTF-8 string to dataset */ - ret = H5Dwrite(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, string); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read string back and make sure it is unchanged */ - ret = H5Dread(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); - CHECK(ret, FAIL, "H5Dread"); - - VERIFY(strcmp(string, read_buf), 0, "strcmp"); - - /* Close all */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Tclose(dtype_id); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); -} - -/* - * test_strpad - * Tests string padding for a UTF-8 string. - * Converts strings to shorter and then longer strings. - * Borrows heavily from dtypes.c, but is more complicated because - * the string is randomly generated. - */ -void -test_strpad(hid_t H5_ATTR_UNUSED fid, const char *string) -{ - /* buf is used to hold the data that H5Tconvert operates on. */ - char buf[LONG_BUF_SIZE]; - - /* cmpbuf holds the output that H5Tconvert should produce, - * to compare against the actual output. */ - char cmpbuf[LONG_BUF_SIZE]; - - /* new_string is a slightly modified version of the UTF-8 - * string to make the tests run more smoothly. */ - char new_string[MAX_STRING_LENGTH + 2]; - - size_t length; /* Length of new_string in bytes */ - size_t small_len; /* Size of the small datatype */ - size_t big_len; /* Size of the larger datatype */ - hid_t src_type, dst_type; - herr_t ret; - - /* The following tests are simpler if the UTF-8 string contains - * the right number of bytes (even or odd, depending on the test). - * We create a 'new_string' whose length is convenient by prepending - * an 'x' to 'string' when necessary. */ - length = strlen(string); - if (length % 2 != 1) { - strcpy(new_string, "x"); - strcat(new_string, string); - length++; - } - else { - strcpy(new_string, string); - } - - /* Convert a null-terminated string to a shorter and longer null - * terminated string. */ - - /* Create a src_type that holds the UTF-8 string and its final NULL */ - big_len = length + 1; /* +1 byte for final NULL */ - assert((2 * big_len) <= sizeof(cmpbuf)); - src_type = mkstr(big_len, H5T_STR_NULLTERM); - CHECK(src_type, FAIL, "mkstr"); - /* Create a dst_type that holds half of the UTF-8 string and a final - * NULL */ - small_len = (length + 1) / 2; - dst_type = mkstr(small_len, H5T_STR_NULLTERM); - CHECK(dst_type, FAIL, "mkstr"); - - /* Fill the buffer with two copies of the UTF-8 string, each with a - * terminating NULL. It will look like "abcdefg\0abcdefg\0". */ - strncpy(buf, new_string, big_len); - strncpy(&buf[big_len], new_string, big_len); - - ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tconvert"); - - /* After conversion, the buffer should look like - * "abc\0abc\0abcdefg\0". Note that this is just what the bytes look - * like; UTF-8 characters may well have been truncated. - * To check that the conversion worked properly, we'll build this - * string manually. */ - strncpy(cmpbuf, new_string, small_len - 1); - cmpbuf[small_len - 1] = '\0'; - strncpy(&cmpbuf[small_len], new_string, small_len - 1); - cmpbuf[2 * small_len - 1] = '\0'; - strcpy(&cmpbuf[2 * small_len], new_string); - - VERIFY(memcmp(buf, cmpbuf, 2 * big_len), 0, "memcmp"); - - /* Now convert from smaller datatype to bigger datatype. This should - * leave our buffer looking like: "abc\0\0\0\0\0abc\0\0\0\0\0" */ - ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tconvert"); - - /* First fill the buffer with NULLs */ - memset(cmpbuf, '\0', (size_t)LONG_BUF_SIZE); - /* Copy in the characters */ - strncpy(cmpbuf, new_string, small_len - 1); - strncpy(&cmpbuf[big_len], new_string, small_len - 1); - - VERIFY(memcmp(buf, cmpbuf, 2 * big_len), 0, "memcmp"); - - ret = H5Tclose(src_type); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(dst_type); - CHECK(ret, FAIL, "H5Tclose"); - - /* Now test null padding. Null-padded strings do *not* need - * terminating NULLs, so the sizes of the datatypes are slightly - * different and we want a string with an even number of characters. */ - length = strlen(string); - if (length % 2 != 0) { - strcpy(new_string, "x"); - strcat(new_string, string); - length++; - } - else { - strcpy(new_string, string); - } - - /* Create a src_type that holds the UTF-8 string */ - big_len = length; - assert((2 * big_len) <= sizeof(cmpbuf)); - src_type = mkstr(big_len, H5T_STR_NULLPAD); - CHECK(src_type, FAIL, "mkstr"); - /* Create a dst_type that holds half of the UTF-8 string */ - small_len = length / 2; - dst_type = mkstr(small_len, H5T_STR_NULLPAD); - CHECK(dst_type, FAIL, "mkstr"); - - /* Fill the buffer with two copies of the UTF-8 string. - * It will look like "abcdefghabcdefgh". */ - strncpy(buf, new_string, big_len); - strncpy(&buf[big_len], new_string, big_len); - - ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tconvert"); - - /* After conversion, the buffer should look like - * "abcdabcdabcdefgh". Note that this is just what the bytes look - * like; UTF-8 characters may well have been truncated. - * To check that the conversion worked properly, we'll build this - * string manually. */ - strncpy(cmpbuf, new_string, small_len); - strncpy(&cmpbuf[small_len], new_string, small_len); - strncpy(&cmpbuf[2 * small_len], new_string, big_len); - - VERIFY(memcmp(buf, cmpbuf, 2 * big_len), 0, "memcmp"); - - /* Now convert from smaller datatype to bigger datatype. This should - * leave our buffer looking like: "abcd\0\0\0\0abcd\0\0\0\0" */ - ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tconvert"); - - /* First fill the buffer with NULLs */ - memset(cmpbuf, '\0', (size_t)LONG_BUF_SIZE); - /* Copy in the characters */ - strncpy(cmpbuf, new_string, small_len); - strncpy(&cmpbuf[big_len], new_string, small_len); - - VERIFY(memcmp(buf, cmpbuf, 2 * big_len), 0, "memcmp"); - - ret = H5Tclose(src_type); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(dst_type); - CHECK(ret, FAIL, "H5Tclose"); - - /* Test space padding. This is very similar to null-padding; we can - use the same values of length, small_len, and big_len. */ - - src_type = mkstr(big_len, H5T_STR_SPACEPAD); - CHECK(src_type, FAIL, "mkstr"); - dst_type = mkstr(small_len, H5T_STR_SPACEPAD); - CHECK(src_type, FAIL, "mkstr"); - - /* Fill the buffer with two copies of the UTF-8 string. - * It will look like "abcdefghabcdefgh". */ - strcpy(buf, new_string); - strcpy(&buf[big_len], new_string); - - ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tconvert"); - - /* After conversion, the buffer should look like - * "abcdabcdabcdefgh". Note that this is just what the bytes look - * like; UTF-8 characters may have been truncated. - * To check that the conversion worked properly, we'll build this - * string manually. */ - strncpy(cmpbuf, new_string, small_len); - strncpy(&cmpbuf[small_len], new_string, small_len); - strncpy(&cmpbuf[2 * small_len], new_string, big_len); - - VERIFY(memcmp(buf, cmpbuf, 2 * big_len), 0, "memcmp"); - - /* Now convert from smaller datatype to bigger datatype. This should - * leave our buffer looking like: "abcd abcd " */ - ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tconvert"); - - /* First fill the buffer with spaces */ - memset(cmpbuf, ' ', (size_t)LONG_BUF_SIZE); - /* Copy in the characters */ - strncpy(cmpbuf, new_string, small_len); - strncpy(&cmpbuf[big_len], new_string, small_len); - - VERIFY(memcmp(buf, cmpbuf, 2 * big_len), 0, "memcmp"); - - ret = H5Tclose(src_type); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(dst_type); - CHECK(ret, FAIL, "H5Tclose"); -} - -/* - * test_vl_string - * Tests variable-length string datatype with UTF-8 strings. - */ -void -test_vl_string(hid_t fid, const char *string) -{ - hid_t type_id, space_id, dset_id; - hsize_t dims = 1; - hsize_t size; /* Number of bytes used */ - char *read_buf[1]; - herr_t ret; - - /* Create dataspace for datasets */ - space_id = H5Screate_simple(RANK, &dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - type_id = H5Tcopy(H5T_C_S1); - CHECK(type_id, FAIL, "H5Tcopy"); - ret = H5Tset_size(type_id, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create a dataset */ - dset_id = H5Dcreate2(fid, VL_DSET1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &string); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dset_id, type_id, space_id, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - VERIFY(size, (hsize_t)strlen(string) + 1, "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - VERIFY(strcmp(string, read_buf[0]), 0, "strcmp"); - - /* Reclaim the read VL data */ - ret = H5Treclaim(type_id, space_id, H5P_DEFAULT, read_buf); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close all */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); -} - -/* - * test_objnames - * Tests that UTF-8 can be used for object names in the file. - * Tests groups, datasets, named datatypes, and soft links. - * Note that this test doesn't actually mark the names as being - * in UTF-8. At the time this test was written, that feature - * didn't exist in HDF5, and when the character encoding property - * was added to links it didn't change how they were stored in the file, - * -JML 2/2/2006 - */ -void -test_objnames(hid_t fid, const char *string) -{ - hid_t grp_id, grp1_id, grp2_id, grp3_id; - hid_t type_id, dset_id, space_id; -#if 0 - char read_buf[MAX_STRING_LENGTH]; -#endif - char path_buf[MAX_PATH_LENGTH]; - hsize_t dims = 1; -#if 0 - hobj_ref_t obj_ref; - ssize_t size; -#endif - herr_t ret; - - /* Create a group with a UTF-8 name */ - grp_id = H5Gcreate2(fid, string, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp_id, FAIL, "H5Gcreate2"); -#if 0 - /* Set a comment on the group to test that we can access the group - * Also test that UTF-8 comments can be read. - */ - ret = H5Oset_comment_by_name(fid, string, string, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oset_comment_by_name"); - size = H5Oget_comment_by_name(fid, string, read_buf, (size_t)MAX_STRING_LENGTH, H5P_DEFAULT); - CHECK(size, FAIL, "H5Oget_comment_by_name"); -#endif - ret = H5Gclose(grp_id); - CHECK(ret, FAIL, "H5Gclose"); -#if 0 - VERIFY(strcmp(string, read_buf), 0, "strcmp"); -#endif - /* Create a new dataset with a UTF-8 name */ - grp1_id = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp1_id, FAIL, "H5Gcreate2"); - - space_id = H5Screate_simple(RANK, &dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - dset_id = H5Dcreate2(grp1_id, string, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - /* Make sure that dataset can be opened again */ - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - - dset_id = H5Dopen2(grp1_id, string, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Dopen2"); - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Gclose(grp1_id); - CHECK(ret, FAIL, "H5Gclose"); - - /* Do the same for a named datatype */ - grp2_id = H5Gcreate2(fid, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp2_id, FAIL, "H5Gcreate2"); - - type_id = H5Tcreate(H5T_OPAQUE, (size_t)1); - CHECK(type_id, FAIL, "H5Tcreate"); - ret = H5Tcommit2(grp2_id, string, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(type_id, FAIL, "H5Tcommit2"); - ret = H5Tclose(type_id); - CHECK(type_id, FAIL, "H5Tclose"); - - type_id = H5Topen2(grp2_id, string, H5P_DEFAULT); - CHECK(type_id, FAIL, "H5Topen2"); - ret = H5Tclose(type_id); - CHECK(type_id, FAIL, "H5Tclose"); - - /* Don't close the group -- use it to test that object references - * can refer to objects named in UTF-8 */ -#if 0 - space_id = H5Screate_simple(RANK, &dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - dset_id = - H5Dcreate2(grp2_id, DSET3_NAME, H5T_STD_REF_OBJ, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Dcreate2"); - - /* Create reference to named datatype */ - ret = H5Rcreate(&obj_ref, grp2_id, string, H5R_OBJECT, (hid_t)-1); - CHECK(ret, FAIL, "H5Rcreate"); - /* Write selection and read it back*/ - ret = H5Dwrite(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); - CHECK(ret, FAIL, "H5Dwrite"); - ret = H5Dread(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); - CHECK(ret, FAIL, "H5Dread"); - - /* Ensure that we can open named datatype using object reference */ - type_id = H5Rdereference2(dset_id, H5P_DEFAULT, H5R_OBJECT, &obj_ref); - CHECK(type_id, FAIL, "H5Rdereference2"); - ret = H5Tcommitted(type_id); - VERIFY(ret, 1, "H5Tcommitted"); - - ret = H5Tclose(type_id); - CHECK(type_id, FAIL, "H5Tclose"); - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); -#endif - ret = H5Gclose(grp2_id); - CHECK(ret, FAIL, "H5Gclose"); - - /* Create "group3". Build a hard link from group3 to group2, which has - * a datatype with the UTF-8 name. Create a soft link in group3 - * pointing through the hard link to the datatype. Give the soft - * link a name in UTF-8. Ensure that the soft link works. */ - - grp3_id = H5Gcreate2(fid, GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(grp3_id, FAIL, "H5Gcreate2"); - - ret = H5Lcreate_hard(fid, GROUP2_NAME, grp3_id, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_hard"); - strcpy(path_buf, GROUP2_NAME); - strcat(path_buf, "/"); - strcat(path_buf, string); - ret = H5Lcreate_hard(grp3_id, path_buf, H5L_SAME_LOC, string, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Lcreate_hard"); - - /* Open named datatype using soft link */ - type_id = H5Topen2(grp3_id, string, H5P_DEFAULT); - CHECK(type_id, FAIL, "H5Topen2"); - - ret = H5Tclose(type_id); - CHECK(type_id, FAIL, "H5Tclose"); - ret = H5Gclose(grp3_id); - CHECK(ret, FAIL, "H5Gclose"); -} - -/* - * test_attrname - * Test that attributes can deal with UTF-8 strings - */ -void -test_attrname(hid_t fid, const char *string) -{ - hid_t group_id, attr_id; - hid_t dtype_id, space_id; - hsize_t dims = 1; - char read_buf[MAX_STRING_LENGTH]; - ssize_t size; - herr_t ret; - - /* Create a new group and give it an attribute whose - * name and value are UTF-8 strings. - */ - group_id = H5Gcreate2(fid, GROUP4_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(group_id, FAIL, "H5Gcreate2"); - - space_id = H5Screate_simple(RANK, &dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - dtype_id = H5Tcopy(H5T_C_S1); - CHECK(dtype_id, FAIL, "H5Tcopy"); - ret = H5Tset_size(dtype_id, (size_t)MAX_STRING_LENGTH); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create the attribute and check that its name is correct */ - attr_id = H5Acreate2(group_id, string, dtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr_id, FAIL, "H5Acreate2"); - size = H5Aget_name(attr_id, (size_t)MAX_STRING_LENGTH, read_buf); - CHECK(size, FAIL, "H5Aget_name"); - ret = strcmp(read_buf, string); - VERIFY(ret, 0, "strcmp"); - read_buf[0] = '\0'; - - /* Try writing and reading from the attribute */ - ret = H5Awrite(attr_id, dtype_id, string); - CHECK(ret, FAIL, "H5Awrite"); - ret = H5Aread(attr_id, dtype_id, read_buf); - CHECK(ret, FAIL, "H5Aread"); - ret = strcmp(read_buf, string); - VERIFY(ret, 0, "strcmp"); - - /* Clean up */ - ret = H5Aclose(attr_id); - CHECK(ret, FAIL, "H5Aclose"); - ret = H5Tclose(dtype_id); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Gclose(group_id); - CHECK(ret, FAIL, "H5Gclose"); -} - -/* - * test_compound - * Test that compound datatypes can have UTF-8 field names. - */ -void -test_compound(hid_t fid, const char *string) -{ - /* Define two compound structures, s1_t and s2_t. - * s2_t is a subset of s1_t, with two out of three - * fields. - * This is stolen from the h5_compound example. - */ - typedef struct s1_t { - int a; - double c; - float b; - } s1_t; - typedef struct s2_t { - double c; - int a; - } s2_t; - /* Actual variable declarations */ - s1_t s1; - s2_t s2; - hid_t s1_tid, s2_tid; - hid_t space_id, dset_id; - hsize_t dim = 1; - char *readbuf; - herr_t ret; - - /* Initialize compound data */ - memset(&s1, 0, sizeof(s1_t)); /* To make purify happy */ - s1.a = COMP_INT_VAL; - s1.c = COMP_DOUBLE_VAL; - s1.b = COMP_FLOAT_VAL; - - /* Create compound datatypes using UTF-8 field name */ - s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t)); - CHECK(s1_tid, FAIL, "H5Tcreate"); - ret = H5Tinsert(s1_tid, string, HOFFSET(s1_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Check that the field name was stored correctly */ - readbuf = H5Tget_member_name(s1_tid, 0); - ret = strcmp(readbuf, string); - VERIFY(ret, 0, "strcmp"); - H5free_memory(readbuf); - - /* Add the other fields to the datatype */ - ret = H5Tinsert(s1_tid, "c_name", HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(s1_tid, "b_name", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create second datatype, with only two fields. */ - s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t)); - CHECK(s2_tid, FAIL, "H5Tcreate"); - ret = H5Tinsert(s2_tid, "c_name", HOFFSET(s2_t, c), H5T_NATIVE_DOUBLE); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(s2_tid, string, HOFFSET(s2_t, a), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create the dataspace and dataset. */ - space_id = H5Screate_simple(1, &dim, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - dset_id = H5Dcreate2(fid, DSET4_NAME, s1_tid, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - /* Write data to the dataset. */ - ret = H5Dwrite(dset_id, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &s1); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Ensure that data can be read back by field name into s2 struct */ - ret = H5Dread(dset_id, s2_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &s2); - CHECK(ret, FAIL, "H5Dread"); - - VERIFY(s2.a, COMP_INT_VAL, "H5Dread"); - VERIFY(s2.c, COMP_DOUBLE_VAL, "H5Dread"); - - /* Clean up */ - ret = H5Tclose(s1_tid); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Tclose(s2_tid); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); -} - -/* - * test_enum - * Test that enumerated datatypes can have UTF-8 member names. - */ -void -test_enum(hid_t H5_ATTR_UNUSED fid, const char *string) -{ - /* Define an enumerated type */ - typedef enum { E1_RED, E1_GREEN, E1_BLUE, E1_WHITE } c_e1; - /* Variable declarations */ - c_e1 val; - herr_t ret; - hid_t type_id; - char readbuf[MAX_STRING_LENGTH]; - - /* Create an enumerated datatype in HDF5 with a UTF-8 member name*/ - type_id = H5Tcreate(H5T_ENUM, sizeof(c_e1)); - CHECK(type_id, FAIL, "H5Tcreate"); - val = E1_RED; - ret = H5Tenum_insert(type_id, "RED", &val); - CHECK(ret, FAIL, "H5Tenum_insert"); - val = E1_GREEN; - ret = H5Tenum_insert(type_id, "GREEN", &val); - CHECK(ret, FAIL, "H5Tenum_insert"); - val = E1_BLUE; - ret = H5Tenum_insert(type_id, "BLUE", &val); - CHECK(ret, FAIL, "H5Tenum_insert"); - val = E1_WHITE; - ret = H5Tenum_insert(type_id, string, &val); - CHECK(ret, FAIL, "H5Tenum_insert"); - - /* Ensure that UTF-8 member name gives the right value and vice versa. */ - ret = H5Tenum_valueof(type_id, string, &val); - CHECK(ret, FAIL, "H5Tenum_valueof"); - VERIFY(val, E1_WHITE, "H5Tenum_valueof"); - ret = H5Tenum_nameof(type_id, &val, readbuf, (size_t)MAX_STRING_LENGTH); - CHECK(ret, FAIL, "H5Tenum_nameof"); - ret = strcmp(readbuf, string); - VERIFY(ret, 0, "strcmp"); - - /* Close the datatype */ - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); -} - -/* - * test_opaque - * Test comments on opaque datatypes - */ -void -test_opaque(hid_t H5_ATTR_UNUSED fid, const char *string) -{ - hid_t type_id; - char *read_buf; - herr_t ret; - - /* Create an opaque type and give it a UTF-8 tag */ - type_id = H5Tcreate(H5T_OPAQUE, (size_t)4); - CHECK(type_id, FAIL, "H5Tcreate"); - ret = H5Tset_tag(type_id, string); - CHECK(ret, FAIL, "H5Tset_tag"); - - /* Read the tag back. */ - read_buf = H5Tget_tag(type_id); - ret = strcmp(read_buf, string); - VERIFY(ret, 0, "H5Tget_tag"); - H5free_memory(read_buf); - - ret = H5Tclose(type_id); - CHECK(ret, FAIL, "H5Tclose"); -} - -/*********************/ -/* Utility functions */ -/*********************/ - -/* mkstr - * Borrwed from dtypes.c. - * Creates a new string data type. Used in string padding tests */ -static hid_t -mkstr(size_t len, H5T_str_t strpad) -{ - hid_t t; - if ((t = H5Tcopy(H5T_C_S1)) < 0) - return -1; - if (H5Tset_size(t, len) < 0) - return -1; - if (H5Tset_strpad(t, strpad) < 0) - return -1; - return t; -} - -/* write_char - * Append a unicode code point c to test_string in UTF-8 encoding. - * Return the new end of the string. - */ -unsigned int -write_char(unsigned int c, char *test_string, unsigned int cur_pos) -{ - if (c < 0x80) { - test_string[cur_pos] = (char)c; - cur_pos++; - } - else if (c < 0x800) { - test_string[cur_pos] = (char)(0xC0 | c >> 6); - test_string[cur_pos + 1] = (char)(0x80 | (c & 0x3F)); - cur_pos += 2; - } - else if (c < 0x10000) { - test_string[cur_pos] = (char)(0xE0 | c >> 12); - test_string[cur_pos + 1] = (char)(0x80 | (c >> 6 & 0x3F)); - test_string[cur_pos + 2] = (char)(0x80 | (c & 0x3F)); - cur_pos += 3; - } - else if (c < 0x200000) { - test_string[cur_pos] = (char)(0xF0 | c >> 18); - test_string[cur_pos + 1] = (char)(0x80 | (c >> 12 & 0x3F)); - test_string[cur_pos + 2] = (char)(0x80 | (c >> 6 & 0x3F)); - test_string[cur_pos + 3] = (char)(0x80 | (c & 0x3F)); - cur_pos += 4; - } - - return cur_pos; -} - -/* dump_string - * Print a string both as text (which will look like garbage) and as hex. - * The text display is not guaranteed to be accurate--certain characters - * could confuse printf (e.g., '\n'). */ -void -dump_string(const char *string) -{ - size_t length; - size_t x; - - printf("The string was:\n %s", string); - printf("Or in hex:\n"); - - length = strlen(string); - - for (x = 0; x < length; x++) - printf("%x ", string[x] & (0x000000FF)); - - printf("\n"); -} - -/* Main test. - * Create a string of random Unicode characters, then run each test with - * that string. - */ -void -test_unicode(void) -{ - char test_string[MAX_STRING_LENGTH]; - unsigned int cur_pos = 0; /* Current position in test_string */ - unsigned int unicode_point; /* Unicode code point for a single character */ - hid_t fid; /* ID of file */ - int x; /* Temporary variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing UTF-8 Encoding\n")); - - /* Create a random string with length NUM_CHARS */ - HDsrandom((unsigned)HDtime(NULL)); - - memset(test_string, 0, sizeof(test_string)); - for (x = 0; x < NUM_CHARS; x++) { - /* We need to avoid unprintable characters (codes 0-31) and the - * . and / characters, since they aren't allowed in path names. - */ - unicode_point = (unsigned)(HDrandom() % (MAX_CODE_POINT - 32)) + 32; - if (unicode_point != 46 && unicode_point != 47) - cur_pos = write_char(unicode_point, test_string, cur_pos); - } - - /* Avoid unlikely case of the null string */ - if (cur_pos == 0) { - test_string[cur_pos] = 'Q'; - cur_pos++; - } - test_string[cur_pos] = '\0'; - - /* Create file */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - - test_fl_string(fid, test_string); - test_strpad(fid, "abcdefgh"); - test_strpad(fid, test_string); - test_vl_string(fid, test_string); - test_objnames(fid, test_string); - test_attrname(fid, test_string); - test_compound(fid, test_string); - test_enum(fid, test_string); - test_opaque(fid, test_string); - - /* Close file */ - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* This function could be useful in debugging if certain strings - * create errors. - */ -#ifdef DEBUG - dump_string(test_string); -#endif /* DEBUG */ -} - -/* cleanup_unicode(void) - * Delete the file this test created. - */ -void -cleanup_unicode(void) -{ - H5Fdelete(FILENAME, H5P_DEFAULT); -} diff --git a/test/API/tvlstr.c b/test/API/tvlstr.c deleted file mode 100644 index a9375c8e865..00000000000 --- a/test/API/tvlstr.c +++ /dev/null @@ -1,1004 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tvlstr - * - * Test the Variable-Length String functionality - * - *************************************************************/ - -#include "testhdf5.h" - -#define DATAFILE "tvlstr.h5" -#define DATAFILE2 "tvlstr2.h5" -#define DATAFILE3 "sel2el.h5" - -#define DATASET "1Darray" - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 -#define NUMP 4 - -#define VLSTR_TYPE "vl_string_type" - -/* Definitions for the VL re-writing test */ -#define REWRITE_NDATASETS 32 - -/* String for testing attributes */ -static const char *string_att = "This is the string for the attribute"; -static char *string_att_write = NULL; - -void *test_vlstr_alloc_custom(size_t size, void *info); -void test_vlstr_free_custom(void *mem, void *info); - -/**************************************************************** -** -** test_vlstr_alloc_custom(): Test VL datatype custom memory -** allocation routines. This routine just uses malloc to -** allocate the memory and increments the amount of memory -** allocated. -** -****************************************************************/ -void * -test_vlstr_alloc_custom(size_t size, void *info) -{ - void *ret_value = NULL; /* Pointer to return */ - size_t *mem_used = (size_t *)info; /* Get the pointer to the memory used */ - size_t extra; /* Extra space needed */ - - /* - * This weird contortion is required on the DEC Alpha to keep the - * alignment correct - QAK - */ - extra = MAX(sizeof(void *), sizeof(size_t)); - - if ((ret_value = malloc(extra + size)) != NULL) { - *(size_t *)ret_value = size; - *mem_used += size; - } /* end if */ - ret_value = ((unsigned char *)ret_value) + extra; - return (ret_value); -} - -/**************************************************************** -** -** test_vlstr_free_custom(): Test VL datatype custom memory -** allocation routines. This routine just uses free to -** release the memory and decrements the amount of memory -** allocated. -** -****************************************************************/ -void -test_vlstr_free_custom(void *_mem, void *info) -{ - unsigned char *mem; - size_t *mem_used = (size_t *)info; /* Get the pointer to the memory used */ - size_t extra; /* Extra space needed */ - - /* - * This weird contortion is required on the DEC Alpha to keep the - * alignment correct - QAK - */ - extra = MAX(sizeof(void *), sizeof(size_t)); - - if (_mem != NULL) { - mem = ((unsigned char *)_mem) - extra; - *mem_used -= *(size_t *)((void *)mem); - free(mem); - } /* end if */ -} - -/**************************************************************** -** -** test_vlstrings_basic(): Test basic VL string code. -** Tests simple VL string I/O -** -****************************************************************/ -static void -test_vlstrings_basic(void) -{ - /* Information to write */ - const char *wdata[SPACE1_DIM1] = { - "Four score and seven years ago our forefathers brought forth on this continent a new nation,", - "conceived in liberty and dedicated to the proposition that all men are created equal.", - "Now we are engaged in a great civil war,", - "testing whether that nation or any nation so conceived and so dedicated can long endure."}; - - char *rdata[SPACE1_DIM1]; /* Information read in */ - char *wdata2; - hid_t dataspace, dataset2; - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t size; /* Number of bytes which will be used */ - unsigned i; /* counting variable */ - size_t str_used; /* String data in memory */ - size_t mem_used = 0; /* Memory used during allocation */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic VL String Functionality\n")); - - /* Create file */ - fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid1 = H5Tcopy(H5T_C_S1); - CHECK(tid1, FAIL, "H5Tcopy"); - - ret = H5Tset_size(tid1, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - dataspace = H5Screate(H5S_SCALAR); - - dataset2 = H5Dcreate2(fid1, "Dataset2", tid1, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - wdata2 = (char *)calloc((size_t)65534, sizeof(char)); - memset(wdata2, 'A', (size_t)65533); - - ret = H5Dwrite(dataset2, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata2); - CHECK(ret, FAIL, "H5Dwrite"); - - H5Sclose(dataspace); - H5Dclose(dataset2); - free(wdata2); - - /* Change to the custom memory allocation routines for reading VL string */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vlstr_alloc_custom, &mem_used, test_vlstr_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* Count the actual number of bytes used by the strings */ - for (i = 0, str_used = 0; i < SPACE1_DIM1; i++) - str_used += strlen(wdata[i]) + 1; - - /* Compare against the strings actually written */ - VERIFY(size, (hsize_t)str_used, "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - VERIFY(mem_used, str_used, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (strlen(wdata[i]) != strlen(rdata[i])) { - TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i, - (int)strlen(wdata[i]), (int)i, (int)strlen(rdata[i])); - continue; - } /* end if */ - if (strcmp(wdata[i], rdata[i]) != 0) { - TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i], - (int)i, rdata[i]); - continue; - } /* end if */ - } /* end for */ - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_vlstrings_basic() */ - -/**************************************************************** -** -** test_vlstrings_special(): Test VL string code for special -** string cases, nil and zero-sized. -** -****************************************************************/ -static void -test_vlstrings_special(void) -{ - const char *wdata[SPACE1_DIM1] = {"", "two", "three", "\0"}; - const char *wdata2[SPACE1_DIM1] = {NULL, NULL, NULL, NULL}; - char *rdata[SPACE1_DIM1]; /* Information read in */ - char *fill; /* Fill value */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hid_t dcpl; /* Dataset creation property list ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - unsigned i; /* counting variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Special VL Strings\n")); - - /* Create file */ - fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid1 = H5Tcopy(H5T_C_S1); - CHECK(tid1, FAIL, "H5Tcopy"); - - ret = H5Tset_size(tid1, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Read from dataset before writing data */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i] != NULL) - TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (strlen(wdata[i]) != strlen(rdata[i])) { - TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i, - (int)strlen(wdata[i]), (int)i, (int)strlen(rdata[i])); - continue; - } /* end if */ - if ((wdata[i] == NULL && rdata[i] != NULL) || (rdata[i] == NULL && wdata[i] != NULL)) { - TestErrPrintf("VL data values don't match!\n"); - continue; - } /* end if */ - if (strcmp(wdata[i], rdata[i]) != 0) { - TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i], - (int)i, rdata[i]); - continue; - } /* end if */ - } /* end for */ - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create another dataset to test nil strings */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set the fill value for the second dataset */ - fill = NULL; - ret = H5Pset_fill_value(dcpl, tid1, &fill); - CHECK(ret, FAIL, "H5Pset_fill_value"); - - dataset = H5Dcreate2(fid1, "Dataset4", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Read from dataset before writing data */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i] != NULL) - TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]); - - /* Try to write nil strings to disk. */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read nil strings back from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i] != NULL) - TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} - -/**************************************************************** -** -** test_vlstring_type(): Test VL string type. -** Tests if VL string is treated as string. -** -****************************************************************/ -static void -test_vlstring_type(void) -{ - hid_t fid; /* HDF5 File IDs */ - hid_t tid_vlstr; - H5T_cset_t cset; - H5T_str_t pad; - htri_t vl_str; /* Whether string is VL */ - herr_t ret; - - /* Output message about test being performed */ - MESSAGE(5, ("Testing VL String type\n")); - - /* Open file */ - fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Create a datatype to refer to */ - tid_vlstr = H5Tcopy(H5T_C_S1); - CHECK(tid_vlstr, FAIL, "H5Tcopy"); - - /* Change padding and verify it */ - ret = H5Tset_strpad(tid_vlstr, H5T_STR_NULLPAD); - CHECK(ret, FAIL, "H5Tset_strpad"); - pad = H5Tget_strpad(tid_vlstr); - VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad"); - - /* Convert to variable-length string */ - ret = H5Tset_size(tid_vlstr, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Check if datatype is VL string */ - ret = H5Tget_class(tid_vlstr); - VERIFY(ret, H5T_STRING, "H5Tget_class"); - ret = H5Tis_variable_str(tid_vlstr); - VERIFY(ret, true, "H5Tis_variable_str"); - - /* Verify that the class detects as a string */ - vl_str = H5Tdetect_class(tid_vlstr, H5T_STRING); - CHECK(vl_str, FAIL, "H5Tdetect_class"); - VERIFY(vl_str, true, "H5Tdetect_class"); - - /* Check default character set and padding */ - cset = H5Tget_cset(tid_vlstr); - VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset"); - pad = H5Tget_strpad(tid_vlstr); - VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad"); - - /* Commit variable-length string datatype to storage */ - ret = H5Tcommit2(fid, VLSTR_TYPE, tid_vlstr, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Tcommit2"); - - /* Close datatype */ - ret = H5Tclose(tid_vlstr); - CHECK(ret, FAIL, "H5Tclose"); - - tid_vlstr = H5Topen2(fid, VLSTR_TYPE, H5P_DEFAULT); - CHECK(tid_vlstr, FAIL, "H5Topen2"); - - ret = H5Tclose(tid_vlstr); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fopen"); - - /* Open the variable-length string datatype just created */ - tid_vlstr = H5Topen2(fid, VLSTR_TYPE, H5P_DEFAULT); - CHECK(tid_vlstr, FAIL, "H5Topen2"); - - /* Verify character set and padding */ - cset = H5Tget_cset(tid_vlstr); - VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset"); - pad = H5Tget_strpad(tid_vlstr); - VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad"); - - /* Close datatype and file */ - ret = H5Tclose(tid_vlstr); - CHECK(ret, FAIL, "H5Tclose"); - ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_vlstring_type() */ - -/**************************************************************** -** -** test_compact_vlstring(): Test code for storing VL strings in -** compact datasets. -** -****************************************************************/ -static void -test_compact_vlstring(void) -{ - const char *wdata[SPACE1_DIM1] = {"one", "two", "three", "four"}; - char *rdata[SPACE1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hid_t plist; /* Dataset creation property list */ - hsize_t dims1[] = {SPACE1_DIM1}; - unsigned i; /* counting variable */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing VL Strings in compact dataset\n")); - - /* Create file */ - fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid1 = H5Tcopy(H5T_C_S1); - CHECK(tid1, FAIL, "H5Tcopy"); - - ret = H5Tset_size(tid1, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - plist = H5Pcreate(H5P_DATASET_CREATE); - CHECK(plist, FAIL, "H5Pcreate"); - - ret = H5Pset_layout(plist, H5D_COMPACT); - CHECK(ret, FAIL, "H5Pset_layout"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset5", tid1, sid1, H5P_DEFAULT, plist, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (strlen(wdata[i]) != strlen(rdata[i])) { - TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i, - (int)strlen(wdata[i]), (int)i, (int)strlen(rdata[i])); - continue; - } /* end if */ - if (strcmp(wdata[i], rdata[i]) != 0) { - TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i], - (int)i, rdata[i]); - continue; - } /* end if */ - } /* end for */ - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset create property list */ - ret = H5Pclose(plist); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /*test_compact_vlstrings*/ - -/**************************************************************** -** -** test_write_vl_string_attribute(): Test basic VL string code. -** Tests writing VL strings as attributes -** -****************************************************************/ -static void -test_write_vl_string_attribute(void) -{ - hid_t file, root, dataspace, att; - hid_t type; - herr_t ret; - char *string_att_check = NULL; - - /* Open the file */ - file = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - - /* Create a datatype to refer to. */ - type = H5Tcopy(H5T_C_S1); - CHECK(type, FAIL, "H5Tcopy"); - - ret = H5Tset_size(type, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - root = H5Gopen2(file, "/", H5P_DEFAULT); - CHECK(root, FAIL, "H5Gopen2"); - - dataspace = H5Screate(H5S_SCALAR); - CHECK(dataspace, FAIL, "H5Screate"); - - /* Test creating a "normal" sized string attribute */ - att = H5Acreate2(root, "test_scalar", type, dataspace, H5P_DEFAULT, H5P_DEFAULT); - CHECK(att, FAIL, "H5Acreate2"); - - ret = H5Awrite(att, type, &string_att); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Aread(att, type, &string_att_check); - CHECK(ret, FAIL, "H5Aread"); - - if (strcmp(string_att_check, string_att) != 0) - TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n", string_att, - string_att_check); - - H5free_memory(string_att_check); - string_att_check = NULL; - - ret = H5Aclose(att); - CHECK(ret, FAIL, "HAclose"); - - /* Test creating a "large" sized string attribute */ - att = H5Acreate2(root, "test_scalar_large", type, dataspace, H5P_DEFAULT, H5P_DEFAULT); - CHECK(att, FAIL, "H5Acreate2"); - - string_att_write = (char *)calloc((size_t)8192, sizeof(char)); - memset(string_att_write, 'A', (size_t)8191); - - ret = H5Awrite(att, type, &string_att_write); - CHECK(ret, FAIL, "H5Awrite"); - - ret = H5Aread(att, type, &string_att_check); - CHECK(ret, FAIL, "H5Aread"); - - if (strcmp(string_att_check, string_att_write) != 0) - TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n", - string_att_write, string_att_check); - - H5free_memory(string_att_check); - string_att_check = NULL; - - /* The attribute string written is freed below, in the test_read_vl_string_attribute() test */ - /* free(string_att_write); */ - - ret = H5Aclose(att); - CHECK(ret, FAIL, "HAclose"); - - ret = H5Gclose(root); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Sclose(dataspace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} - -/**************************************************************** -** -** test_read_vl_string_attribute(): Test basic VL string code. -** Tests reading VL strings from attributes -** -****************************************************************/ -static void -test_read_vl_string_attribute(void) -{ - hid_t file, root, att; - hid_t type; - herr_t ret; - char *string_att_check = NULL; - - /* Open file */ - file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file, FAIL, "H5Fopen"); - - /* Create a datatype to refer to. */ - type = H5Tcopy(H5T_C_S1); - CHECK(type, FAIL, "H5Tcopy"); - - ret = H5Tset_size(type, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - root = H5Gopen2(file, "/", H5P_DEFAULT); - CHECK(root, FAIL, "H5Gopen2"); - - /* Test reading "normal" sized string attribute */ - att = H5Aopen(root, "test_scalar", H5P_DEFAULT); - CHECK(att, FAIL, "H5Aopen"); - - ret = H5Aread(att, type, &string_att_check); - CHECK(ret, FAIL, "H5Aread"); - - if (strcmp(string_att_check, string_att) != 0) - TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n", string_att, - string_att_check); - - H5free_memory(string_att_check); - string_att_check = NULL; - - ret = H5Aclose(att); - CHECK(ret, FAIL, "HAclose"); - - /* Test reading "large" sized string attribute */ - att = H5Aopen(root, "test_scalar_large", H5P_DEFAULT); - CHECK(att, FAIL, "H5Aopen"); - - if (string_att_write) { - ret = H5Aread(att, type, &string_att_check); - CHECK(ret, FAIL, "H5Aread"); - - if (strcmp(string_att_check, string_att_write) != 0) - TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n", - string_att_write, string_att_check); - - H5free_memory(string_att_check); - string_att_check = NULL; - } - - /* Free string allocated in test_write_vl_string_attribute */ - if (string_att_write) - free(string_att_write); - - ret = H5Aclose(att); - CHECK(ret, FAIL, "HAclose"); - - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Gclose(root); - CHECK(ret, FAIL, "H5Gclose"); - - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); -} - -/* Helper routine for test_vl_rewrite() */ -static void -write_scalar_dset(hid_t file, hid_t type, hid_t space, char *name, char *data) -{ - hid_t dset; - herr_t ret; - - dset = H5Dcreate2(file, name, type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); - - ret = H5Dwrite(dset, type, space, space, H5P_DEFAULT, &data); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); -} - -/* Helper routine for test_vl_rewrite() */ -static void -read_scalar_dset(hid_t file, hid_t type, hid_t space, char *name, char *data) -{ - hid_t dset; - herr_t ret; - char *data_read; - - dset = H5Dopen2(file, name, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dopen2"); - - ret = H5Dread(dset, type, space, space, H5P_DEFAULT, &data_read); - CHECK(ret, FAIL, "H5Dread"); - - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - - if (strcmp(data, data_read) != 0) - TestErrPrintf("Expected %s for dataset %s but read %s\n", data, name, data_read); - - ret = H5Treclaim(type, space, H5P_DEFAULT, &data_read); - CHECK(ret, FAIL, "H5Treclaim"); -} - -/**************************************************************** -** -** test_vl_rewrite(): Test basic VL string code. -** Tests I/O on VL strings when lots of objects in the file -** have been linked/unlinked. -** -****************************************************************/ -static void -test_vl_rewrite(void) -{ - hid_t file1, file2; /* File IDs */ - hid_t type; /* VL string datatype ID */ - hid_t space; /* Scalar dataspace */ - char name[256]; /* Buffer for names & data */ - int i; /* Local index variable */ - herr_t ret; /* Generic return value */ - - /* Create the VL string datatype */ - type = H5Tcopy(H5T_C_S1); - CHECK(type, FAIL, "H5Tcopy"); - - ret = H5Tset_size(type, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create the scalar dataspace */ - space = H5Screate(H5S_SCALAR); - CHECK(space, FAIL, "H5Screate"); - - /* Open the files */ - file1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1, FAIL, "H5Fcreate"); - - file2 = H5Fcreate(DATAFILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1, FAIL, "H5Fcreate"); - - /* Create in file 1 */ - for (i = 0; i < REWRITE_NDATASETS; i++) { - snprintf(name, sizeof(name), "/set_%d", i); - write_scalar_dset(file1, type, space, name, name); - } - - /* Effectively copy data from file 1 to 2 */ - for (i = 0; i < REWRITE_NDATASETS; i++) { - snprintf(name, sizeof(name), "/set_%d", i); - read_scalar_dset(file1, type, space, name, name); - write_scalar_dset(file2, type, space, name, name); - } - - /* Read back from file 2 */ - for (i = 0; i < REWRITE_NDATASETS; i++) { - snprintf(name, sizeof(name), "/set_%d", i); - read_scalar_dset(file2, type, space, name, name); - } /* end for */ - - /* Remove from file 2. */ - for (i = 0; i < REWRITE_NDATASETS; i++) { - snprintf(name, sizeof(name), "/set_%d", i); - ret = H5Ldelete(file2, name, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end for */ - - /* Effectively copy from file 1 to file 2 */ - for (i = 0; i < REWRITE_NDATASETS; i++) { - snprintf(name, sizeof(name), "/set_%d", i); - read_scalar_dset(file1, type, space, name, name); - write_scalar_dset(file2, type, space, name, name); - } /* end for */ - - /* Close everything */ - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Sclose(space); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file1); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Fclose(file2); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_vl_rewrite() */ - -/**************************************************************** - ** - ** test_write_same_element(): - ** Tests writing to the same element of VL string using - ** H5Sselect_element. - ** - ****************************************************************/ -static void -test_write_same_element(void) -{ - hid_t file1, dataset1; - hid_t mspace, fspace, dtype; - hsize_t fdim[] = {SPACE1_DIM1}; - const char *wdata[SPACE1_DIM1] = {"Parting", "is such a", "sweet", "sorrow."}; - const char *val[SPACE1_DIM1] = {"But", "reuniting", "is a", "great joy"}; - hsize_t marray[] = {NUMP}; - hsize_t coord[SPACE1_RANK][NUMP]; - herr_t ret; - - MESSAGE(5, ("Testing writing to same element of VL string dataset twice\n")); - - if ((vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) && (vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { - file1 = H5Fcreate(DATAFILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file1, FAIL, "H5Fcreate"); - - dtype = H5Tcopy(H5T_C_S1); - CHECK(dtype, FAIL, "H5Tcopy"); - - ret = H5Tset_size(dtype, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - fspace = H5Screate_simple(SPACE1_RANK, fdim, NULL); - CHECK(fspace, FAIL, "H5Screate_simple"); - - dataset1 = H5Dcreate2(file1, DATASET, dtype, fspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset1, FAIL, "H5Dcreate"); - - ret = H5Dwrite(dataset1, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dclose(dataset1); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Sclose(fspace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file1); - CHECK(ret, FAIL, "H5Fclose"); - - /* - * Open the file. Select the same points, write values to those point locations. - */ - file1 = H5Fopen(DATAFILE3, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file1, FAIL, "H5Fopen"); - - dataset1 = H5Dopen2(file1, DATASET, H5P_DEFAULT); - CHECK(dataset1, FAIL, "H5Dopen"); - - fspace = H5Dget_space(dataset1); - CHECK(fspace, FAIL, "H5Dget_space"); - - dtype = H5Dget_type(dataset1); - CHECK(dtype, FAIL, "H5Dget_type"); - - mspace = H5Screate_simple(1, marray, NULL); - CHECK(mspace, FAIL, "H5Screate_simple"); - - coord[0][0] = 0; - coord[0][1] = 2; - coord[0][2] = 2; - coord[0][3] = 0; - - ret = H5Sselect_elements(fspace, H5S_SELECT_SET, NUMP, (const hsize_t *)&coord); - CHECK(ret, FAIL, "H5Sselect_elements"); - - ret = H5Dwrite(dataset1, dtype, mspace, fspace, H5P_DEFAULT, val); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Tclose(dtype); - CHECK(ret, FAIL, "H5Tclose"); - - ret = H5Dclose(dataset1); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(fspace); - CHECK(ret, FAIL, "H5Dclose"); - - ret = H5Sclose(mspace); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Fclose(file1); - CHECK(ret, FAIL, "H5Fclose"); - } -} /* test_write_same_element */ - -/**************************************************************** -** -** test_vlstrings(): Main VL string testing routine. -** -****************************************************************/ -void -test_vlstrings(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing Variable-Length Strings\n")); - - /* These tests use the same file */ - /* Test basic VL string datatype */ - test_vlstrings_basic(); - test_vlstrings_special(); - test_vlstring_type(); - test_compact_vlstring(); - - /* Test using VL strings in attributes */ - test_write_vl_string_attribute(); - test_read_vl_string_attribute(); - - /* Test writing VL datasets in files with lots of unlinking */ - test_vl_rewrite(); - /* Test writing to the same element more than once using H5Sselect_elements */ - test_write_same_element(); -} /* test_vlstrings() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_vlstrings - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_vlstrings(void) -{ - H5Fdelete(DATAFILE, H5P_DEFAULT); - H5Fdelete(DATAFILE2, H5P_DEFAULT); - H5Fdelete(DATAFILE3, H5P_DEFAULT); -} diff --git a/test/API/tvltypes.c b/test/API/tvltypes.c deleted file mode 100644 index 9e6027cd935..00000000000 --- a/test/API/tvltypes.c +++ /dev/null @@ -1,3258 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/*********************************************************** - * - * Test program: tvltypes - * - * Test the Variable-Length Datatype functionality - * - *************************************************************/ - -#include "testhdf5.h" - -/* #include "H5Dprivate.h" */ - -#define FILENAME "tvltypes.h5" - -/* 1-D dataset with fixed dimensions */ -#define SPACE1_RANK 1 -#define SPACE1_DIM1 4 - -/* 1-D dataset with fixed dimensions */ -#define SPACE3_RANK 1 -#define SPACE3_DIM1 128 -#define L1_INCM 16 -#define L2_INCM 8 -#define L3_INCM 3 - -/* Default temporary buffer size - Pulled from H5Dprivate.h */ -#define H5D_TEMP_BUF_SIZE (1024 * 1024) - -/* 1-D dataset with fixed dimensions */ -#define SPACE4_RANK 1 -#define SPACE4_DIM_SMALL 128 -#define SPACE4_DIM_LARGE (H5D_TEMP_BUF_SIZE / 64) - -void *test_vltypes_alloc_custom(size_t size, void *info); -void test_vltypes_free_custom(void *mem, void *info); - -/**************************************************************** -** -** test_vltypes_alloc_custom(): Test VL datatype custom memory -** allocation routines. This routine just uses malloc to -** allocate the memory and increments the amount of memory -** allocated. -** -****************************************************************/ -void * -test_vltypes_alloc_custom(size_t size, void *mem_used) -{ - void *ret_value; /* Pointer to return */ - const size_t extra = MAX(sizeof(void *), sizeof(size_t)); /* Extra space needed */ - /* (This weird contortion is required on the - * DEC Alpha to keep the alignment correct - QAK) - */ - - if ((ret_value = malloc(extra + size)) != NULL) { - *(size_t *)ret_value = size; - *(size_t *)mem_used += size; - } /* end if */ - - ret_value = ((unsigned char *)ret_value) + extra; - - return (ret_value); -} - -/**************************************************************** -** -** test_vltypes_free_custom(): Test VL datatype custom memory -** allocation routines. This routine just uses free to -** release the memory and decrements the amount of memory -** allocated. -** -****************************************************************/ -void -test_vltypes_free_custom(void *_mem, void *mem_used) -{ - if (_mem) { - const size_t extra = MAX(sizeof(void *), sizeof(size_t)); /* Extra space needed */ - /* (This weird contortion is required - * on the DEC Alpha to keep the - * alignment correct - QAK) - */ - unsigned char *mem = ((unsigned char *)_mem) - extra; /* Pointer to actual block allocated */ - - *(size_t *)mem_used -= *(size_t *)((void *)mem); - free(mem); - } /* end if */ -} - -/**************************************************************** -** -** test_vltypes_data_create(): Dataset of VL is supposed to -** fail when fill value is never written to dataset. -** -****************************************************************/ -static void -test_vltypes_dataset_create(void) -{ - hid_t fid1; /* HDF5 File IDs */ - hid_t dcpl; /* Dataset Property list */ - hid_t dataset; /* Dataset ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Dataset of VL Datatype Functionality\n")); - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid1 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create dataset property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); - - /* Set fill value writing time to be NEVER */ - ret = H5Pset_fill_time(dcpl, H5D_FILL_TIME_NEVER); - CHECK(ret, FAIL, "H5Pset_fill_time"); - - /* Create a dataset, supposed to fail */ - H5E_BEGIN_TRY - { - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT); - } - H5E_END_TRY - VERIFY(dataset, FAIL, "H5Dcreate2"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} - -/**************************************************************** -** -** test_vltypes_funcs(): Test some type functions that are and -** aren't supposed to work with VL type. -** -****************************************************************/ -static void -test_vltypes_funcs(void) -{ - hid_t type; /* Datatype ID */ - size_t size; - H5T_pad_t inpad; - H5T_norm_t norm; - H5T_cset_t cset; - H5T_str_t strpad; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing some type functions for VL\n")); - - /* Create a datatype to refer to */ - type = H5Tvlen_create(H5T_IEEE_F32BE); - CHECK(type, FAIL, "H5Tvlen_create"); - - size = H5Tget_precision(type); - CHECK(size, 0, "H5Tget_precision"); - - size = H5Tget_size(type); - CHECK(size, 0, "H5Tget_size"); - - size = H5Tget_ebias(type); - CHECK(size, 0, "H5Tget_ebias"); - - ret = H5Tset_pad(type, H5T_PAD_ZERO, H5T_PAD_ONE); - CHECK(ret, FAIL, "H5Tset_pad"); - - inpad = H5Tget_inpad(type); - CHECK(inpad, FAIL, "H5Tget_inpad"); - - norm = H5Tget_norm(type); - CHECK(norm, FAIL, "H5Tget_norm"); - - ret = H5Tset_offset(type, (size_t)16); - CHECK(ret, FAIL, "H5Tset_offset"); - - H5E_BEGIN_TRY - { - cset = H5Tget_cset(type); - } - H5E_END_TRY - VERIFY(cset, FAIL, "H5Tget_cset"); - - H5E_BEGIN_TRY - { - strpad = H5Tget_strpad(type); - } - H5E_END_TRY - VERIFY(strpad, FAIL, "H5Tget_strpad"); - - /* Close datatype */ - ret = H5Tclose(type); - CHECK(ret, FAIL, "H5Tclose"); -} - -/**************************************************************** -** -** test_vltypes_vlen_atomic(): Test basic VL datatype code. -** Tests VL datatypes of atomic datatypes -** -****************************************************************/ -static void -test_vltypes_vlen_atomic(void) -{ - hvl_t wdata[SPACE1_DIM1]; /* Information to write */ - hvl_t wdata2[SPACE1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1]; /* Information read in */ - hvl_t fill; /* Fill value */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t sid2; /* ID of bad dataspace (no extent set) */ - hid_t tid1; /* Datatype ID */ - hid_t dcpl_pid; /* Dataset creation property list ID */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Atomic VL Datatype Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].p = malloc((i + 1) * sizeof(unsigned int)); - wdata[i].len = i + 1; - for (j = 0; j < (i + 1); j++) - ((unsigned int *)wdata[i].p)[j] = i * 10 + j; - - wdata2[i].p = NULL; - wdata2[i].len = 0; - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a datatype to refer to */ - tid1 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Read from dataset before writing data */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i].len != 0 || rdata[i].p != NULL) - TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, - (unsigned)rdata[i].len, (int)i, rdata[i].p); - - /* Write "nil" data to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read from dataset with "nil" data */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i].len != 0 || rdata[i].p != NULL) - TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, - (unsigned)rdata[i].len, (int)i, rdata[i].p); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create second dataset, with fill value */ - dcpl_pid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl_pid, FAIL, "H5Pcreate"); - - /* Set the fill value for the second dataset */ - fill.p = NULL; - fill.len = 0; - ret = H5Pset_fill_value(dcpl_pid, tid1, &fill); - CHECK(ret, FAIL, "H5Pset_fill_value"); - - /* Create a second dataset */ - dataset = H5Dcreate2(fid1, "Dataset2", tid1, sid1, H5P_DEFAULT, dcpl_pid, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Read from dataset before writing data */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i].len != 0 || rdata[i].p != NULL) - TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, - (unsigned)rdata[i].len, (int)i, rdata[i].p); - - /* Write "nil" data to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read from dataset with "nil" data */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i].len != 0 || rdata[i].p != NULL) - TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i, - (unsigned)rdata[i].len, (int)i, rdata[i].p); - - /* Write data to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file for data checking */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get datatype for dataset */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].len; j++) { - if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) { - TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i, - (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j, - (int)((unsigned int *)rdata[i].p)[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Open second dataset */ - dataset = H5Dopen2(fid1, "Dataset2", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get datatype for dataset */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Create a "bad" dataspace with no extent set */ - sid2 = H5Screate(H5S_SIMPLE); - CHECK(sid2, FAIL, "H5Screate"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size"); - - /* Try to call H5Dvlen_get_buf with bad dataspace */ - H5E_BEGIN_TRY - { - ret = H5Dvlen_get_buf_size(dataset, tid1, sid2, &size); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].len; j++) { - if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) { - TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i, - (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j, - (int)((unsigned int *)rdata[i].p)[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Try to reclaim read data using "bad" dataspace with no extent - * Should fail */ - H5E_BEGIN_TRY - { - ret = H5Treclaim(tid1, sid2, xfer_pid, rdata); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Treclaim"); - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_vltypes_vlen_atomic() */ - -/**************************************************************** -** -** rewrite_vltypes_vlen_atomic(): check memory leak for basic VL datatype. -** Check memory leak for VL datatypes of atomic datatypes -** -****************************************************************/ -static void -rewrite_vltypes_vlen_atomic(void) -{ - hvl_t wdata[SPACE1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1; /* Datatype ID */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - unsigned increment = 4; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Check Memory Leak for Basic Atomic VL Datatype Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].p = malloc((i + increment) * sizeof(unsigned int)); - wdata[i].len = i + increment; - for (j = 0; j < (i + increment); j++) - ((unsigned int *)wdata[i].p)[j] = i * 20 + j; - } /* end for */ - - /* Open file created in test_vltypes_vlen_atomic() */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset created in test_vltypes_vlen_atomic() */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Open dataspace for dataset */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get datatype for dataset */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file for data checking */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get datatype for dataset */ - tid1 = H5Dget_type(dataset); - CHECK(tid1, FAIL, "H5Dget_type"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 22 elements allocated = 4+5+6+7 elements for each array position */ - VERIFY(size, 22 * sizeof(unsigned int), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 22 elements allocated = 4+5+6+7 elements for each array position */ - VERIFY(mem_used, 22 * sizeof(unsigned int), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].len; j++) { - if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) { - TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i, - (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j, - (int)((unsigned int *)rdata[i].p)[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the read VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end rewrite_vltypes_vlen_atomic() */ - -/**************************************************************** -** -** test_vltypes_vlen_compound(): Test basic VL datatype code. -** Test VL datatypes of compound datatypes -** -****************************************************************/ -static void -test_vltypes_vlen_compound(void) -{ - typedef struct { /* Struct that the VL sequences are composed of */ - int i; - float f; - } s1; - hvl_t wdata[SPACE1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1, tid2; /* Datatype IDs */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Basic Compound VL Datatype Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].p = malloc((i + 1) * sizeof(s1)); - wdata[i].len = i + 1; - for (j = 0; j < (i + 1); j++) { - ((s1 *)wdata[i].p)[j].i = (int)(i * 10 + j); - ((s1 *)wdata[i].p)[j].f = (float)(i * 20 + j) / 3.0F; - } /* end for */ - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create the base compound type */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create a datatype to refer to */ - tid1 = H5Tvlen_create(tid2); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(s1), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(s1), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].len; j++) { - if (((s1 *)wdata[i].p)[j].i != ((s1 *)rdata[i].p)[j].i) { - TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].i=%d, rdata[%d].p[%d].i=%d\n", - (int)i, (int)j, (int)((s1 *)wdata[i].p)[j].i, (int)i, (int)j, - (int)((s1 *)rdata[i].p)[j].i); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(((s1 *)wdata[i].p)[j].f, ((s1 *)rdata[i].p)[j].f)) { - TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].f=%f, rdata[%d].p[%d].f=%f\n", - (int)i, (int)j, (double)((s1 *)wdata[i].p)[j].f, (int)i, (int)j, - (double)((s1 *)rdata[i].p)[j].f); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_vltypes_vlen_compound() */ - -/**************************************************************** -** -** rewrite_vltypes_vlen_compound(): Check memory leak for basic VL datatype. -** Checks memory leak for VL datatypes of compound datatypes -** -****************************************************************/ -static void -rewrite_vltypes_vlen_compound(void) -{ - typedef struct { /* Struct that the VL sequences are composed of */ - int i; - float f; - } s1; - hvl_t wdata[SPACE1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1, tid2; /* Datatype IDs */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - unsigned increment = 4; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Check Memory Leak for Basic Compound VL Datatype Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].p = malloc((i + increment) * sizeof(s1)); - wdata[i].len = i + increment; - for (j = 0; j < (i + increment); j++) { - ((s1 *)wdata[i].p)[j].i = (int)(i * 40 + j); - ((s1 *)wdata[i].p)[j].f = (float)(i * 60 + j) / 3.0F; - } /* end for */ - } /* end for */ - - /* Create file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Create the base compound type */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); - CHECK(tid2, FAIL, "H5Tcreate"); - - ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create a datatype to refer to */ - tid1 = H5Tvlen_create(tid2); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Create dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 22 elements allocated = 4 + 5 + 6 + 7 elements for each array position */ - VERIFY(size, 22 * sizeof(s1), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 22 elements allocated = 4 + 5 + 6 + 7 elements for each array position */ - VERIFY(mem_used, 22 * sizeof(s1), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].len; j++) { - if (((s1 *)wdata[i].p)[j].i != ((s1 *)rdata[i].p)[j].i) { - TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].i=%d, rdata[%d].p[%d].i=%d\n", - (int)i, (int)j, (int)((s1 *)wdata[i].p)[j].i, (int)i, (int)j, - (int)((s1 *)rdata[i].p)[j].i); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(((s1 *)wdata[i].p)[j].f, ((s1 *)rdata[i].p)[j].f)) { - TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].f=%f, rdata[%d].p[%d].f=%f\n", - (int)i, (int)j, (double)((s1 *)wdata[i].p)[j].f, (int)i, (int)j, - (double)((s1 *)rdata[i].p)[j].f); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the VL data */ - ret = H5Treclaim(tid1, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end rewrite_vltypes_vlen_compound() */ - -/**************************************************************** -** -** test_vltypes_compound_vlen_vlen(): Test basic VL datatype code. -** Tests compound datatypes with VL datatypes of VL datatypes. -** -****************************************************************/ -static void -test_vltypes_compound_vlen_vlen(void) -{ - typedef struct { /* Struct that the compound type are composed of */ - int i; - float f; - hvl_t v; - } s1; - s1 *wdata; /* data to write */ - s1 *rdata; /* data to read */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1, tid2, tid3; /* Datatype IDs */ - hsize_t dims1[] = {SPACE3_DIM1}; - unsigned i, j, k; /* counting variables */ - hvl_t *t1, *t2; /* Temporary pointer to VL information */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Compound Datatypes with VL Atomic Datatype Component Functionality\n")); - - /* Allocate and initialize VL data to write */ - wdata = (s1 *)malloc(sizeof(s1) * SPACE3_DIM1); - CHECK_PTR(wdata, "malloc"); - rdata = (s1 *)malloc(sizeof(s1) * SPACE3_DIM1); - CHECK_PTR(rdata, "malloc"); - for (i = 0; i < SPACE3_DIM1; i++) { - wdata[i].i = (int)(i * 10); - wdata[i].f = (float)(i * 20) / 3.0F; - wdata[i].v.p = malloc((i + L1_INCM) * sizeof(hvl_t)); - wdata[i].v.len = i + L1_INCM; - for (t1 = (hvl_t *)((wdata[i].v).p), j = 0; j < (i + L1_INCM); j++, t1++) { - t1->p = malloc((j + L2_INCM) * sizeof(unsigned int)); - t1->len = j + L2_INCM; - for (k = 0; k < j + L2_INCM; k++) - ((unsigned int *)t1->p)[k] = i * 100 + j * 10 + k; - } /* end for */ - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE3_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a VL datatype to refer to */ - tid3 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid3, FAIL, "H5Tvlen_create"); - - /* Create a VL datatype to refer to */ - tid1 = H5Tvlen_create(tid3); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create the base compound type */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE3_DIM1; i++) { - if (wdata[i].i != rdata[i].i) { - TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, - (int)wdata[i].i, (int)i, (int)rdata[i].i); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { - TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, - (double)wdata[i].f, (int)i, (double)rdata[i].f); - continue; - } /* end if */ - - if (wdata[i].v.len != rdata[i].v.len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", - __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); - continue; - } /* end if */ - - for (t1 = (hvl_t *)(wdata[i].v.p), t2 = (hvl_t *)(rdata[i].v.p), j = 0; j < rdata[i].v.len; - j++, t1++, t2++) { - if (t1->len != t2->len) { - TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", - __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); - continue; - } /* end if */ - for (k = 0; k < t2->len; k++) { - if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { - TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, - (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - } /* end for */ - - /* Reclaim the VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid3); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Release buffers */ - free(wdata); - free(rdata); -} /* end test_vltypes_compound_vlen_vlen() */ - -/**************************************************************** -** -** test_vltypes_compound_vlstr(): Test VL datatype code. -** Tests VL datatypes of compound datatypes with VL string. -** Dataset is extensible chunked, and data is rewritten with -** shorter VL data. -** -****************************************************************/ -static void -test_vltypes_compound_vlstr(void) -{ - typedef enum { red, blue, green } e1; - typedef struct { - char *string; - e1 color; - } s2; - typedef struct { /* Struct that the compound type are composed of */ - hvl_t v; - } s1; - s1 wdata[SPACE1_DIM1]; /* data to write */ - s1 wdata2[SPACE1_DIM1]; /* data to write */ - s1 rdata[SPACE1_DIM1]; /* data to read */ - s1 rdata2[SPACE1_DIM1]; /* data to read */ - char str[64] = "a\0"; - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset, dset2; /* Dataset ID */ - hid_t sid1, sid2, filespace, filespace2; /* Dataspace ID */ - hid_t tid1, tid2, tid3, tid4, tid5; /* Datatype IDs */ - hid_t cparms; - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t chunk_dims[] = {SPACE1_DIM1 / 2}; - hsize_t maxdims[] = {H5S_UNLIMITED}; - hsize_t size[] = {SPACE1_DIM1}; - hsize_t offset[] = {0}; - unsigned i, j; /* counting variables */ - s2 *t1, *t2; /* Temporary pointer to VL information */ - int val; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing VL Datatype of Compound Datatype with VL String Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].v.p = (s2 *)malloc((i + L3_INCM) * sizeof(s2)); - wdata[i].v.len = i + L3_INCM; - for (t1 = (s2 *)((wdata[i].v).p), j = 0; j < (i + L3_INCM); j++, t1++) { - strcat(str, "m"); - t1->string = (char *)malloc(strlen(str) * sizeof(char) + 1); - strcpy(t1->string, str); - /*t1->color = red;*/ - t1->color = blue; - } - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, maxdims); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a VL string type*/ - tid4 = H5Tcopy(H5T_C_S1); - CHECK(tid4, FAIL, "H5Tcopy"); - ret = H5Tset_size(tid4, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create an enum type */ - tid3 = H5Tenum_create(H5T_STD_I32LE); - val = 0; - ret = H5Tenum_insert(tid3, "RED", &val); - CHECK(ret, FAIL, "H5Tenum_insert"); - val = 1; - ret = H5Tenum_insert(tid3, "BLUE", &val); - CHECK(ret, FAIL, "H5Tenum_insert"); - val = 2; - ret = H5Tenum_insert(tid3, "GREEN", &val); - CHECK(ret, FAIL, "H5Tenum_insert"); - - /* Create the first layer compound type */ - tid5 = H5Tcreate(H5T_COMPOUND, sizeof(s2)); - CHECK(tid5, FAIL, "H5Tcreate"); - /* Insert fields */ - ret = H5Tinsert(tid5, "string", HOFFSET(s2, string), tid4); - CHECK(ret, FAIL, "H5Tinsert"); - /* Insert fields */ - ret = H5Tinsert(tid5, "enumerate", HOFFSET(s2, color), tid3); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create a VL datatype of first layer compound type */ - tid1 = H5Tvlen_create(tid5); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create the base compound type */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Modify dataset creation properties, i.e. enable chunking */ - cparms = H5Pcreate(H5P_DATASET_CREATE); - ret = H5Pset_chunk(cparms, SPACE1_RANK, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, cparms, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Extend the dataset. This call assures that dataset is 4.*/ - ret = H5Dset_extent(dataset, size); - CHECK(ret, FAIL, "H5Dset_extent"); - - /* Select a hyperslab */ - filespace = H5Dget_space(dataset); - ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, dims1, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, sid1, filespace, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL); - CHECK(ret, FAIL, "H5Fflush"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close disk dataspace */ - ret = H5Sclose(filespace); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid4); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid5); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid3); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close Property list */ - ret = H5Pclose(cparms); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dset2 = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dset2, FAIL, "H5Dopen2"); - - /* Get the data type */ - tid2 = H5Dget_type(dset2); - CHECK(tid2, FAIL, "H5Dget_type"); - - /* Read dataset from disk */ - ret = H5Dread(dset2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].v.len != rdata[i].v.len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", - __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); - continue; - } /* end if */ - - for (t1 = (s2 *)(wdata[i].v.p), t2 = (s2 *)(rdata[i].v.p), j = 0; j < rdata[i].v.len; - j++, t1++, t2++) { - if (strcmp(t1->string, t2->string) != 0) { - TestErrPrintf("VL data values don't match!, t1->string=%s, t2->string=%s\n", t1->string, - t2->string); - continue; - } /* end if */ - if (t1->color != t2->color) { - TestErrPrintf("VL data values don't match!, t1->color=%d, t2->color=%d\n", t1->color, - t2->color); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Use this part for new data */ - strcpy(str, "bbbbbbbb\0"); - for (i = 0; i < SPACE1_DIM1; i++) { - wdata2[i].v.p = (s2 *)malloc((i + 1) * sizeof(s2)); - wdata2[i].v.len = i + 1; - for (t1 = (s2 *)(wdata2[i].v).p, j = 0; j < i + 1; j++, t1++) { - strcat(str, "pp"); - t1->string = (char *)malloc(strlen(str) * sizeof(char) + 1); - strcpy(t1->string, str); - t1->color = green; - } - } /* end for */ - - /* Select a hyperslab */ - filespace2 = H5Dget_space(dset2); - ret = H5Sselect_hyperslab(filespace2, H5S_SELECT_SET, offset, NULL, dims1, NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create dataspace for datasets */ - sid2 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Write dataset to disk */ - ret = H5Dwrite(dset2, tid2, sid2, filespace2, H5P_DEFAULT, &wdata2); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read dataset from disk */ - ret = H5Dread(dset2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata2); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata2[i].v.len != rdata2[i].v.len) { - TestErrPrintf("%d: VL data length don't match!, wdata2[%d].v.len=%d, rdata2[%d].v.len=%d\n", - __LINE__, (int)i, (int)wdata2[i].v.len, (int)i, (int)rdata2[i].v.len); - continue; - } /* end if */ - - for (t1 = (s2 *)(wdata2[i].v.p), t2 = (s2 *)(rdata2[i].v.p), j = 0; j < rdata2[i].v.len; - j++, t1++, t2++) { - if (strcmp(t1->string, t2->string) != 0) { - TestErrPrintf("VL data values don't match!, t1->string=%s, t2->string=%s\n", t1->string, - t2->string); - continue; - } /* end if */ - if (t1->color != t2->color) { - TestErrPrintf("VL data values don't match!, t1->color=%d, t2->color=%d\n", t1->color, - t2->color); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata2); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Reclaim the VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata2); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Dclose(dset2); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close disk dataspace */ - ret = H5Sclose(filespace2); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); -} /* end test_vltypes_compound_vlstr() */ - -/**************************************************************** -** -** test_vltypes_compound_vlen_atomic(): Test basic VL datatype code. -** Tests compound datatypes with VL datatypes of atomic datatypes. -** -****************************************************************/ -static void -test_vltypes_compound_vlen_atomic(void) -{ - typedef struct { /* Struct that the VL sequences are composed of */ - int i; - float f; - hvl_t v; - } s1; - s1 wdata[SPACE1_DIM1]; /* Information to write */ - s1 rdata[SPACE1_DIM1]; /* Information read in */ - s1 fill; /* Fill value */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1, tid2; /* Datatype IDs */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hid_t dcpl_pid; /* Dataset creation property list ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing Compound Datatypes with VL Atomic Datatype Component Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].i = (int)(i * 10); - wdata[i].f = (float)(i * 20) / 3.0F; - wdata[i].v.p = malloc((i + 1) * sizeof(unsigned int)); - wdata[i].v.len = i + 1; - for (j = 0; j < (i + 1); j++) - ((unsigned int *)wdata[i].v.p)[j] = i * 10 + j; - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a VL datatype to refer to */ - tid1 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create the base compound type */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].i != rdata[i].i) { - TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, - (int)wdata[i].i, (int)i, (int)rdata[i].i); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { - TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, - (double)wdata[i].f, (int)i, (double)rdata[i].f); - continue; - } /* end if */ - if (wdata[i].v.len != rdata[i].v.len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", - __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].v.len; j++) { - if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) { - TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n", - (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j, - (int)((unsigned int *)rdata[i].v.p)[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the VL data */ - ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a second dataset, with a fill value */ - dcpl_pid = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl_pid, FAIL, "H5Pcreate"); - - /* Set the fill value for the second dataset */ - memset(&fill, 0, sizeof(s1)); - ret = H5Pset_fill_value(dcpl_pid, tid2, &fill); - CHECK(ret, FAIL, "H5Pset_fill_value"); - - dataset = H5Dcreate2(fid1, "Dataset2", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Close dataset creation property list */ - ret = H5Pclose(dcpl_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Read from dataset before writing data */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Check data read in */ - for (i = 0; i < SPACE1_DIM1; i++) - if (rdata[i].i != 0 || !H5_FLT_ABS_EQUAL(rdata[i].f, 0.0F) || rdata[i].v.len != 0 || - rdata[i].v.p != NULL) - TestErrPrintf( - "VL doesn't match!, rdata[%d].i=%d, rdata[%d].f=%f, rdata[%d].v.len=%u, rdata[%d].v.p=%p\n", - (int)i, rdata[i].i, (int)i, (double)rdata[i].f, (int)i, (unsigned)rdata[i].v.len, (int)i, - rdata[i].v.p); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].i != rdata[i].i) { - TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, - (int)wdata[i].i, (int)i, (int)rdata[i].i); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { - TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, - (double)wdata[i].f, (int)i, (double)rdata[i].f); - continue; - } /* end if */ - if (wdata[i].v.len != rdata[i].v.len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", - __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].v.len; j++) { - if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) { - TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n", - (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j, - (int)((unsigned int *)rdata[i].v.p)[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_vltypes_compound_vlen_atomic() */ - -/**************************************************************** -** -** rewrite_vltypes_compound_vlen_atomic(): Check memory leak for -** basic VL datatype code. -** Check memory leak for compound datatypes with VL datatypes -** of atomic datatypes. -** -****************************************************************/ -static void -rewrite_vltypes_compound_vlen_atomic(void) -{ - typedef struct { /* Struct that the VL sequences are composed of */ - int i; - float f; - hvl_t v; - } s1; - s1 wdata[SPACE1_DIM1]; /* Information to write */ - s1 rdata[SPACE1_DIM1]; /* Information read in */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1, tid2; /* Datatype IDs */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - unsigned increment = 4; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, - ("Checking memory leak for compound datatype with VL Atomic Datatype Component Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].i = (int)(i * 40); - wdata[i].f = (float)(i * 50) / 3.0F; - wdata[i].v.p = malloc((i + increment) * sizeof(unsigned int)); - wdata[i].v.len = i + increment; - for (j = 0; j < (i + increment); j++) - ((unsigned int *)wdata[i].v.p)[j] = i * 60 + j; - } /* end for */ - - /* Create file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Create a VL datatype to refer to */ - tid1 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create the base compound type */ - tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1)); - CHECK(tid2, FAIL, "H5Tcreate"); - - /* Insert fields */ - ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT); - CHECK(ret, FAIL, "H5Tinsert"); - ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Create a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Create dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory will be used */ - ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 22 elements allocated = 4+5+6+7 elements for each array position */ - VERIFY(size, 22 * sizeof(unsigned int), "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 22 elements allocated = 4+5+6+7 elements for each array position */ - VERIFY(mem_used, 22 * sizeof(unsigned int), "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].i != rdata[i].i) { - TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i, - (int)wdata[i].i, (int)i, (int)rdata[i].i); - continue; - } /* end if */ - if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) { - TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i, - (double)wdata[i].f, (int)i, (double)rdata[i].f); - continue; - } /* end if */ - if (wdata[i].v.len != rdata[i].v.len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n", - __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len); - continue; - } /* end if */ - for (j = 0; j < rdata[i].v.len; j++) { - if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) { - TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n", - (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j, - (int)((unsigned int *)rdata[i].v.p)[j]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Reclaim the VL data */ - ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end rewrite_vltypes_compound_vlen_atomic() */ - -/**************************************************************** -** -** vlen_size_func(): Test basic VL datatype code. -** Tests VL datatype with VL datatypes of atomic datatypes. -** -****************************************************************/ -static size_t -vlen_size_func(unsigned long n) -{ - size_t u = 1; - size_t tmp = 1; - size_t result = 1; - - while (u < n) { - u++; - tmp += u; - result += tmp; - } - return (result); -} - -/**************************************************************** -** -** test_vltypes_vlen_vlen_atomic(): Test basic VL datatype code. -** Tests VL datatype with VL datatypes of atomic datatypes. -** -****************************************************************/ -static void -test_vltypes_vlen_vlen_atomic(void) -{ - hvl_t wdata[SPACE1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1]; /* Information read in */ - hvl_t *t1, *t2; /* Temporary pointer to VL information */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid1, tid2; /* Datatype IDs */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t dims1[] = {SPACE1_DIM1}; - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j, k; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Testing VL Datatypes with VL Atomic Datatype Component Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].p = malloc((i + 1) * sizeof(hvl_t)); - if (wdata[i].p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); - return; - } /* end if */ - wdata[i].len = i + 1; - for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + 1); j++, t1++) { - t1->p = malloc((j + 1) * sizeof(unsigned int)); - if (t1->p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); - return; - } /* end if */ - t1->len = j + 1; - for (k = 0; k < (j + 1); k++) - ((unsigned int *)t1->p)[k] = i * 100 + j * 10 + k; - } /* end for */ - } /* end for */ - - /* Create file */ - fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fcreate"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a VL datatype to refer to */ - tid1 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create the base VL type */ - tid2 = H5Tvlen_create(tid1); - CHECK(tid2, FAIL, "H5Tvlen_create"); - - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dcreate2"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Create dataspace for datasets */ - sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); - CHECK(sid1, FAIL, "H5Screate_simple"); - - /* Create a VL datatype to refer to */ - tid1 = H5Tvlen_create(H5T_NATIVE_UINT); - CHECK(tid1, FAIL, "H5Tvlen_create"); - - /* Create the base VL type */ - tid2 = H5Tvlen_create(tid1); - CHECK(tid2, FAIL, "H5Tvlen_create"); - - /* Open a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory was used */ - ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ - VERIFY(size, - (hsize_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + - vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), - "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ - VERIFY(mem_used, - (size_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + - vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), - "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (t1 = (hvl_t *)wdata[i].p, t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len; j++, t1++, t2++) { - if (t1->len != t2->len) { - TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", - __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); - continue; - } /* end if */ - for (k = 0; k < t2->len; k++) { - if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { - TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, - (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - } /* end for */ - - /* Reclaim all the (nested) VL data */ - ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close datatype */ - ret = H5Tclose(tid1); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end test_vltypes_vlen_vlen_atomic() */ - -/**************************************************************** -** -** rewrite_longer_vltypes_vlen_vlen_atomic(): Test basic VL datatype code. -** Tests VL datatype with VL datatypes of atomic datatypes. -** -****************************************************************/ -static void -rewrite_longer_vltypes_vlen_vlen_atomic(void) -{ - hvl_t wdata[SPACE1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1]; /* Information read in */ - hvl_t *t1, *t2; /* Temporary pointer to VL information */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid2; /* Datatype IDs */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j, k; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - unsigned increment = 1; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Check memory leak for VL Datatypes with VL Atomic Datatype Component Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].p = malloc((i + increment) * sizeof(hvl_t)); - if (wdata[i].p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); - return; - } /* end if */ - wdata[i].len = i + increment; - for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + increment); j++, t1++) { - t1->p = malloc((j + 1) * sizeof(unsigned int)); - if (t1->p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); - return; - } /* end if */ - t1->len = j + 1; - for (k = 0; k < (j + 1); k++) - ((unsigned int *)t1->p)[k] = i * 1000 + j * 100 + k * 10; - } /* end for */ - } /* end for */ - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Open datatype of the dataset */ - tid2 = H5Dget_type(dataset); - CHECK(tid2, FAIL, "H5Dget_type"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file for data checking */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get datatype for dataset */ - tid2 = H5Dget_type(dataset); - CHECK(tid2, FAIL, "H5Dget_type"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory was used */ - ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 18 hvl_t elements allocated = 3 + 4 + 5 + 6 elements for each array position */ - /* 52 unsigned int elements allocated = 6 + 10 + 15 + 21 elements */ - /*VERIFY(size, 18 * sizeof(hvl_t) + 52 * sizeof(unsigned int), "H5Dvlen_get_buf_size");*/ - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 18 hvl_t elements allocated = 3+4+5+6elements for each array position */ - /* 52 unsigned int elements allocated = 6+10+15+21 elements */ - /*VERIFY(mem_used,18*sizeof(hvl_t)+52*sizeof(unsigned int),"H5Dread");*/ - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (t1 = (hvl_t *)(wdata[i].p), t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len; - j++, t1++, t2++) { - if (t1->len != t2->len) { - TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", - __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); - continue; - } /* end if */ - for (k = 0; k < t2->len; k++) { - if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { - TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, - (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - } /* end for */ - - /* Reclaim all the (nested) VL data */ - ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end rewrite_longer_vltypes_vlen_vlen_atomic() */ - -/**************************************************************** -** -** rewrite_shorter_vltypes_vlen_vlen_atomic(): Test basic VL datatype code. -** Tests VL datatype with VL datatypes of atomic datatypes. -** -****************************************************************/ -static void -rewrite_shorter_vltypes_vlen_vlen_atomic(void) -{ - hvl_t wdata[SPACE1_DIM1]; /* Information to write */ - hvl_t rdata[SPACE1_DIM1]; /* Information read in */ - hvl_t *t1, *t2; /* Temporary pointer to VL information */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t sid1; /* Dataspace ID */ - hid_t tid2; /* Datatype IDs */ - hid_t xfer_pid; /* Dataset transfer property list ID */ - hsize_t size; /* Number of bytes which will be used */ - unsigned i, j, k; /* counting variables */ - size_t mem_used = 0; /* Memory used during allocation */ - unsigned increment = 1; - herr_t ret; /* Generic return value */ - - /* Output message about test being performed */ - MESSAGE(5, ("Check memory leak for VL Datatypes with VL Atomic Datatype Component Functionality\n")); - - /* Allocate and initialize VL data to write */ - for (i = 0; i < SPACE1_DIM1; i++) { - wdata[i].p = malloc((i + increment) * sizeof(hvl_t)); - if (wdata[i].p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i); - return; - } /* end if */ - wdata[i].len = i + increment; - for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + increment); j++, t1++) { - t1->p = malloc((j + 1) * sizeof(unsigned int)); - if (t1->p == NULL) { - TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j); - return; - } /* end if */ - t1->len = j + 1; - for (k = 0; k < (j + 1); k++) - ((unsigned int *)t1->p)[k] = i * 100000 + j * 1000 + k * 10; - } /* end for */ - } /* end for */ - - /* Open file */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open the dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Open datatype of the dataset */ - tid2 = H5Dget_type(dataset); - CHECK(tid2, FAIL, "H5Dget_type"); - - /* Write dataset to disk */ - ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file for data checking */ - fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(fid1, FAIL, "H5Fopen"); - - /* Open a dataset */ - dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT); - CHECK(dataset, FAIL, "H5Dopen2"); - - /* Get dataspace for datasets */ - sid1 = H5Dget_space(dataset); - CHECK(sid1, FAIL, "H5Dget_space"); - - /* Get datatype for dataset */ - tid2 = H5Dget_type(dataset); - CHECK(tid2, FAIL, "H5Dget_type"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Make certain the correct amount of memory was used */ - ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size); - CHECK(ret, FAIL, "H5Dvlen_get_buf_size"); - - /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ - VERIFY(size, - (hsize_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + - vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), - "H5Dvlen_get_buf_size"); - - /* Read dataset from disk */ - ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Dread"); - - /* Make certain the correct amount of memory has been used */ - /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */ - /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */ - VERIFY(mem_used, - (size_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) + - vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)), - "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < SPACE1_DIM1; i++) { - if (wdata[i].len != rdata[i].len) { - TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__, - (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len); - continue; - } /* end if */ - for (t1 = (hvl_t *)(wdata[i].p), t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len; - j++, t1++, t2++) { - if (t1->len != t2->len) { - TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n", - __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len); - continue; - } /* end if */ - for (k = 0; k < t2->len; k++) { - if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) { - TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k, - (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]); - continue; - } /* end if */ - } /* end for */ - } /* end for */ - } /* end for */ - - /* Reclaim all the (nested) VL data */ - ret = H5Treclaim(tid2, sid1, xfer_pid, rdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Make certain the VL memory has been freed */ - VERIFY(mem_used, 0, "H5Treclaim"); - - /* Reclaim the write VL data */ - ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close datatype */ - ret = H5Tclose(tid2); - CHECK(ret, FAIL, "H5Tclose"); - - /* Close disk dataspace */ - ret = H5Sclose(sid1); - CHECK(ret, FAIL, "H5Sclose"); - - /* Close dataset transfer property list */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - /* Close file */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); - -} /* end rewrite_shorter_vltypes_vlen_vlen_atomic() */ - -/**************************************************************** -** -** test_vltypes_fill_value(): Test fill value for VL data. -** One tests data space isn't allocated; another tests data -** space is allocated. -** -****************************************************************/ -static void -test_vltypes_fill_value(void) -{ - typedef struct dtype1_struct { - unsigned int gui; - unsigned int pgui; - const char *str_id; - const char *str_name; - const char *str_desc; - const char *str_orig; - const char *str_stat; - unsigned int ver; - double val; - double ma; - double mi; - const char *str_form; - const char *str_unit; - } dtype1_struct; - - herr_t ret; - hid_t file_id; - hid_t dtype1_id = -1; - hid_t str_id = -1; - hid_t small_dspace_id; /* Dataspace ID for small datasets */ - hid_t large_dspace_id; /* Dataspace ID for large datasets */ - hid_t small_select_dspace_id; /* Dataspace ID for selection in small datasets */ - hid_t large_select_dspace_id; /* Dataspace ID for selection in large datasets */ - hid_t dset_dspace_id = -1; /* Dataspace ID for a particular dataset */ - hid_t dset_select_dspace_id = -1; /* Dataspace ID for selection in a particular dataset */ - hid_t scalar_dspace_id; /* Dataspace ID for scalar dataspace */ - hid_t single_dspace_id; /* Dataspace ID for single element selection */ - hsize_t single_offset[] = {2}; /* Offset of single element selection */ - hsize_t single_block[] = {1}; /* Block size of single element selection */ - hsize_t select_offset[] = {0}; /* Offset of non-contiguous element selection */ - hsize_t select_stride[] = {2}; /* Stride size of non-contiguous element selection */ - hsize_t small_select_count[] = {SPACE4_DIM_SMALL / - 2}; /* Count of small non-contiguous element selection */ - hsize_t large_select_count[] = {SPACE4_DIM_LARGE / - 2}; /* Count of large non-contiguous element selection */ - hsize_t select_block[] = {1}; /* Block size of non-contiguous element selection */ - hid_t dcpl_id, xfer_pid; - hid_t dset_id; - hsize_t small_dims[] = {SPACE4_DIM_SMALL}; - hsize_t large_dims[] = {SPACE4_DIM_LARGE}; - size_t dset_elmts = 0; /* Number of elements in a particular dataset */ - const dtype1_struct fill1 = {1, 2, "foobar", "", NULL, "\0", "dead", - 3, 4.0, 100.0, 1.0, "liquid", "meter"}; - const dtype1_struct wdata = {3, 4, "", NULL, "\0", "foo", "two", 6, 8.0, 200.0, 2.0, "solid", "yard"}; - dtype1_struct *rbuf = NULL; /* Buffer for reading data */ - size_t mem_used = 0; /* Memory used during allocation */ - H5D_layout_t layout; /* Dataset storage layout */ - char dset_name1[64], dset_name2[64]; /* Dataset names */ - unsigned i; - - /* Output message about test being performed */ - MESSAGE(5, ("Check fill value for VL data\n")); - - /* Create a string datatype */ - str_id = H5Tcopy(H5T_C_S1); - CHECK(str_id, FAIL, "H5Tcopy"); - ret = H5Tset_size(str_id, H5T_VARIABLE); - CHECK(ret, FAIL, "H5Tset_size"); - - /* Create a compound data type */ - dtype1_id = H5Tcreate(H5T_COMPOUND, sizeof(struct dtype1_struct)); - CHECK(dtype1_id, FAIL, "H5Tcreate"); - - ret = H5Tinsert(dtype1_id, "guid", HOFFSET(struct dtype1_struct, gui), H5T_NATIVE_UINT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "pguid", HOFFSET(struct dtype1_struct, pgui), H5T_NATIVE_UINT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "str_id", HOFFSET(dtype1_struct, str_id), str_id); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "str_name", HOFFSET(dtype1_struct, str_name), str_id); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "str_desc", HOFFSET(dtype1_struct, str_desc), str_id); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "str_orig", HOFFSET(dtype1_struct, str_orig), str_id); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "str_stat", HOFFSET(dtype1_struct, str_stat), str_id); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "ver", HOFFSET(struct dtype1_struct, ver), H5T_NATIVE_UINT); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "val", HOFFSET(struct dtype1_struct, val), H5T_NATIVE_DOUBLE); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "ma", HOFFSET(struct dtype1_struct, ma), H5T_NATIVE_DOUBLE); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "mi", HOFFSET(struct dtype1_struct, mi), H5T_NATIVE_DOUBLE); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "str_form", HOFFSET(dtype1_struct, str_form), str_id); - CHECK(ret, FAIL, "H5Tinsert"); - - ret = H5Tinsert(dtype1_id, "str_unit", HOFFSET(dtype1_struct, str_unit), str_id); - CHECK(ret, FAIL, "H5Tinsert"); - - /* Close string datatype */ - ret = H5Tclose(str_id); - CHECK(ret, FAIL, "H5Tclose"); - - /* Allocate space for the buffer to read data */ - rbuf = (dtype1_struct *)malloc(SPACE4_DIM_LARGE * sizeof(dtype1_struct)); - CHECK_PTR(rbuf, "malloc"); - - /* Create the small & large dataspaces to use */ - small_dspace_id = H5Screate_simple(SPACE4_RANK, small_dims, NULL); - CHECK(small_dspace_id, FAIL, "H5Screate_simple"); - - large_dspace_id = H5Screate_simple(SPACE4_RANK, large_dims, NULL); - CHECK(large_dspace_id, FAIL, "H5Screate_simple"); - - /* Create small & large dataspaces w/non-contiguous selections */ - small_select_dspace_id = H5Scopy(small_dspace_id); - CHECK(small_select_dspace_id, FAIL, "H5Scopy"); - - ret = H5Sselect_hyperslab(small_select_dspace_id, H5S_SELECT_SET, select_offset, select_stride, - small_select_count, select_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - large_select_dspace_id = H5Scopy(large_dspace_id); - CHECK(large_select_dspace_id, FAIL, "H5Scopy"); - - ret = H5Sselect_hyperslab(large_select_dspace_id, H5S_SELECT_SET, select_offset, select_stride, - large_select_count, select_block); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Create a scalar dataspace */ - scalar_dspace_id = H5Screate(H5S_SCALAR); - CHECK(scalar_dspace_id, FAIL, "H5Screate"); - - /* Create dataset create property list and set the fill value */ - dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl_id, FAIL, "H5Pcreate"); - - ret = H5Pset_fill_value(dcpl_id, dtype1_id, &fill1); - CHECK(ret, FAIL, "H5Pset_fill_value"); - - /* Create the file */ - file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - - /* Create datasets with different storage layouts */ - for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { - unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */ - unsigned test_loop; /* Loop over datasets */ - -#ifdef H5_HAVE_FILTER_DEFLATE - if (layout == H5D_CHUNKED) - compress_loop = 2; - else -#endif /* H5_HAVE_FILTER_DEFLATE */ - compress_loop = 1; - - /* Loop over dataset operations */ - for (test_loop = 0; test_loop < compress_loop; test_loop++) { - hid_t tmp_dcpl_id; /* Temporary copy of the dataset creation property list */ - - /* Make a copy of the dataset creation property list */ - tmp_dcpl_id = H5Pcopy(dcpl_id); - CHECK(tmp_dcpl_id, FAIL, "H5Pcopy"); - - /* Layout specific actions */ - switch (layout) { - case H5D_COMPACT: - strcpy(dset_name1, "dataset1-compact"); - strcpy(dset_name2, "dataset2-compact"); - dset_dspace_id = small_dspace_id; - ret = H5Pset_layout(tmp_dcpl_id, H5D_COMPACT); - CHECK(ret, FAIL, "H5Pset_layout"); - break; - - case H5D_CONTIGUOUS: - strcpy(dset_name1, "dataset1-contig"); - strcpy(dset_name2, "dataset2-contig"); - dset_dspace_id = large_dspace_id; - break; - - case H5D_CHUNKED: { - hsize_t chunk_dims[1] = {SPACE4_DIM_LARGE / 4}; - - dset_dspace_id = large_dspace_id; - ret = H5Pset_chunk(tmp_dcpl_id, 1, chunk_dims); - CHECK(ret, FAIL, "H5Pset_chunk"); -#ifdef H5_HAVE_FILTER_DEFLATE - if (test_loop == 1) { - strcpy(dset_name1, "dataset1-chunked-compressed"); - strcpy(dset_name2, "dataset2-chunked-compressed"); - ret = H5Pset_deflate(tmp_dcpl_id, 3); - CHECK(ret, FAIL, "H5Pset_deflate"); - } /* end if */ - else { -#endif /* H5_HAVE_FILTER_DEFLATE */ - strcpy(dset_name1, "dataset1-chunked"); - strcpy(dset_name2, "dataset2-chunked"); -#ifdef H5_HAVE_FILTER_DEFLATE - } /* end else */ -#endif /* H5_HAVE_FILTER_DEFLATE */ - } break; - - case H5D_VIRTUAL: - assert(0 && "Invalid layout type!"); - break; - - case H5D_LAYOUT_ERROR: - case H5D_NLAYOUTS: - default: - assert(0 && "Unknown layout type!"); - break; - } /* end switch */ - - /* Create first data set with default setting - no space is allocated */ - dset_id = H5Dcreate2(file_id, dset_name1, dtype1_id, dset_dspace_id, H5P_DEFAULT, tmp_dcpl_id, - H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Create a second data set with space allocated and fill value written */ - ret = H5Pset_fill_time(tmp_dcpl_id, H5D_FILL_TIME_IFSET); - CHECK(ret, FAIL, "H5Pset_fill_time"); - - ret = H5Pset_alloc_time(tmp_dcpl_id, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); - - dset_id = H5Dcreate2(file_id, dset_name2, dtype1_id, dset_dspace_id, H5P_DEFAULT, tmp_dcpl_id, - H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dcreate2"); - - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close temporary DCPL */ - ret = H5Pclose(tmp_dcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - } /* end for */ - } /* end for */ - - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - ret = H5Pclose(dcpl_id); - CHECK(ret, FAIL, "H5Pclose"); - - /* Change to the custom memory allocation routines for reading VL data */ - xfer_pid = H5Pcreate(H5P_DATASET_XFER); - CHECK(xfer_pid, FAIL, "H5Pcreate"); - - ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom, - &mem_used); - CHECK(ret, FAIL, "H5Pset_vlen_mem_manager"); - - /* Open the file to check data set value */ - file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - - /* Read empty datasets with different storage layouts */ - for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { - unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */ - unsigned test_loop; /* Loop over datasets */ - -#ifdef H5_HAVE_FILTER_DEFLATE - if (layout == H5D_CHUNKED) - compress_loop = 2; - else -#endif /* H5_HAVE_FILTER_DEFLATE */ - compress_loop = 1; - - /* Loop over dataset operations */ - for (test_loop = 0; test_loop < compress_loop; test_loop++) { - - /* Layout specific actions */ - switch (layout) { - case H5D_COMPACT: - strcpy(dset_name1, "dataset1-compact"); - strcpy(dset_name2, "dataset2-compact"); - dset_dspace_id = small_dspace_id; - dset_select_dspace_id = small_select_dspace_id; - dset_elmts = SPACE4_DIM_SMALL; - break; - - case H5D_CONTIGUOUS: - strcpy(dset_name1, "dataset1-contig"); - strcpy(dset_name2, "dataset2-contig"); - dset_dspace_id = large_dspace_id; - dset_select_dspace_id = large_select_dspace_id; - dset_elmts = SPACE4_DIM_LARGE; - break; - - case H5D_CHUNKED: -#ifdef H5_HAVE_FILTER_DEFLATE - if (test_loop == 1) { - strcpy(dset_name1, "dataset1-chunked-compressed"); - strcpy(dset_name2, "dataset2-chunked-compressed"); - } /* end if */ - else { -#endif /* H5_HAVE_FILTER_DEFLATE */ - strcpy(dset_name1, "dataset1-chunked"); - strcpy(dset_name2, "dataset2-chunked"); -#ifdef H5_HAVE_FILTER_DEFLATE - } /* end else */ -#endif /* H5_HAVE_FILTER_DEFLATE */ - dset_dspace_id = large_dspace_id; - dset_select_dspace_id = large_select_dspace_id; - dset_elmts = SPACE4_DIM_LARGE; - break; - - case H5D_VIRTUAL: - assert(0 && "Invalid layout type!"); - break; - - case H5D_LAYOUT_ERROR: - case H5D_NLAYOUTS: - default: - assert(0 && "Unknown layout type!"); - break; - } /* end switch */ - - /* Open first data set */ - dset_id = H5Dopen2(file_id, dset_name1, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - /* Read in the entire 'empty' dataset of fill value */ - ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Clear the read buffer */ - memset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); - - /* Read in non-contiguous selection from 'empty' dataset of fill value */ - ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if ((i % 2) == select_offset[0]) { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || - rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end else */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open the second data set to check the value of data */ - dset_id = H5Dopen2(file_id, dset_name2, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - /* Read in the entire 'empty' dataset of fill value */ - ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Clear the read buffer */ - memset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); - - /* Read in non-contiguous selection from 'empty' dataset of fill value */ - ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if ((i % 2) == select_offset[0]) { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || - rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end else */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ - } /* end for */ - - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Open the file to check data set value */ - file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - - /* Write one element & fill values to datasets with different storage layouts */ - for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { - unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */ - unsigned test_loop; /* Loop over datasets */ - -#ifdef H5_HAVE_FILTER_DEFLATE - if (layout == H5D_CHUNKED) - compress_loop = 2; - else -#endif /* H5_HAVE_FILTER_DEFLATE */ - compress_loop = 1; - - /* Loop over dataset operations */ - for (test_loop = 0; test_loop < compress_loop; test_loop++) { - - /* Layout specific actions */ - switch (layout) { - case H5D_COMPACT: - strcpy(dset_name1, "dataset1-compact"); - strcpy(dset_name2, "dataset2-compact"); - dset_dspace_id = small_dspace_id; - dset_select_dspace_id = small_select_dspace_id; - dset_elmts = SPACE4_DIM_SMALL; - break; - - case H5D_CONTIGUOUS: - strcpy(dset_name1, "dataset1-contig"); - strcpy(dset_name2, "dataset2-contig"); - dset_dspace_id = large_dspace_id; - dset_select_dspace_id = large_select_dspace_id; - dset_elmts = SPACE4_DIM_LARGE; - break; - - case H5D_CHUNKED: -#ifdef H5_HAVE_FILTER_DEFLATE - if (test_loop == 1) { - strcpy(dset_name1, "dataset1-chunked-compressed"); - strcpy(dset_name2, "dataset2-chunked-compressed"); - } /* end if */ - else { -#endif /* H5_HAVE_FILTER_DEFLATE */ - strcpy(dset_name1, "dataset1-chunked"); - strcpy(dset_name2, "dataset2-chunked"); -#ifdef H5_HAVE_FILTER_DEFLATE - } /* end else */ -#endif /* H5_HAVE_FILTER_DEFLATE */ - dset_dspace_id = large_dspace_id; - dset_select_dspace_id = large_select_dspace_id; - dset_elmts = SPACE4_DIM_LARGE; - break; - - case H5D_VIRTUAL: - assert(0 && "Invalid layout type!"); - break; - - case H5D_LAYOUT_ERROR: - case H5D_NLAYOUTS: - default: - assert(0 && "Unknown layout type!"); - break; - } /* end switch */ - - /* Copy the dataset's dataspace */ - single_dspace_id = H5Scopy(dset_dspace_id); - CHECK(single_dspace_id, FAIL, "H5Scopy"); - - /* Set a single element in the dataspace */ - ret = H5Sselect_hyperslab(single_dspace_id, H5S_SELECT_SET, single_offset, NULL, single_block, - NULL); - CHECK(ret, FAIL, "H5Sselect_hyperslab"); - - /* Open first data set */ - dset_id = H5Dopen2(file_id, dset_name1, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - /* Write one element in the dataset */ - ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if (i == single_offset[0]) { - if (strcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || - strcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || - strcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || - strcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || - strcmp(rbuf[i].str_form, wdata.str_form) != 0 || - strcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Clear the read buffer */ - memset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); - - /* Read in non-contiguous selection from dataset */ - ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if (i == single_offset[0]) { - if (strcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || - strcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || - strcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || - strcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || - strcmp(rbuf[i].str_form, wdata.str_form) != 0 || - strcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if ((i % 2) == select_offset[0]) { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || - strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || - rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end else */ - } /* end else */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Open the second data set to check the value of data */ - dset_id = H5Dopen2(file_id, dset_name2, H5P_DEFAULT); - CHECK(dset_id, FAIL, "H5Dopen2"); - - /* Write one element in the dataset */ - ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata); - CHECK(ret, FAIL, "H5Dwrite"); - - ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if (i == single_offset[0]) { - if (strcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || - strcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || - strcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || - strcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || - strcmp(rbuf[i].str_form, wdata.str_form) != 0 || - strcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - /* Clear the read buffer */ - memset(rbuf, 0, dset_elmts * sizeof(dtype1_struct)); - - /* Read in non-contiguous selection from dataset */ - ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Dread"); - - /* Compare data read in */ - for (i = 0; i < dset_elmts; i++) { - if (i == single_offset[0]) { - if (strcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name || - strcmp(rbuf[i].str_desc, wdata.str_desc) != 0 || - strcmp(rbuf[i].str_orig, wdata.str_orig) != 0 || - strcmp(rbuf[i].str_stat, wdata.str_stat) != 0 || - strcmp(rbuf[i].str_form, wdata.str_form) != 0 || - strcmp(rbuf[i].str_unit, wdata.str_unit) != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if ((i % 2) == select_offset[0]) { - if (strcmp(rbuf[i].str_id, "foobar") != 0 || strcmp(rbuf[i].str_name, "") != 0 || - rbuf[i].str_desc || strcmp(rbuf[i].str_orig, "\0") != 0 || - strcmp(rbuf[i].str_stat, "dead") != 0 || - strcmp(rbuf[i].str_form, "liquid") != 0 || - strcmp(rbuf[i].str_unit, "meter") != 0) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end if */ - else { - if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig || - rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) { - TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i); - continue; - } /* end if */ - } /* end else */ - } /* end else */ - } /* end for */ - - /* Release the space */ - ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf); - CHECK(ret, FAIL, "H5Treclaim"); - - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - - /* Close the dataspace for the writes */ - ret = H5Sclose(single_dspace_id); - CHECK(ret, FAIL, "H5Sclose"); - } /* end for */ - } /* end for */ - - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); - - /* Clean up rest of IDs */ - ret = H5Pclose(xfer_pid); - CHECK(ret, FAIL, "H5Pclose"); - - ret = H5Sclose(small_dspace_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(large_dspace_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(small_select_dspace_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(large_select_dspace_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Sclose(scalar_dspace_id); - CHECK(ret, FAIL, "H5Sclose"); - - ret = H5Tclose(dtype1_id); - CHECK(ret, FAIL, "H5Tclose"); - - /* Release buffer */ - free(rbuf); -} /* end test_vltypes_fill_value() */ - -/**************************************************************** -** -** test_vltypes(): Main VL datatype testing routine. -** -****************************************************************/ -void -test_vltypes(void) -{ - /* Output message about test being performed */ - MESSAGE(5, ("Testing Variable-Length Datatypes\n")); - - /* These next tests use the same file */ - test_vltypes_dataset_create(); /* Check dataset of VL when fill value - * won't be rewritten to it.*/ - test_vltypes_funcs(); /* Test functions with VL types */ - test_vltypes_vlen_atomic(); /* Test VL atomic datatypes */ - rewrite_vltypes_vlen_atomic(); /* Check VL memory leak */ - test_vltypes_vlen_compound(); /* Test VL compound datatypes */ - rewrite_vltypes_vlen_compound(); /* Check VL memory leak */ - test_vltypes_compound_vlen_atomic(); /* Test compound datatypes with VL atomic components */ - rewrite_vltypes_compound_vlen_atomic(); /* Check VL memory leak */ - test_vltypes_vlen_vlen_atomic(); /* Test VL datatype with VL atomic components */ - rewrite_longer_vltypes_vlen_vlen_atomic(); /*overwrite with VL data of longer sequence*/ - rewrite_shorter_vltypes_vlen_vlen_atomic(); /*overwrite with VL data of shorted sequence*/ - test_vltypes_compound_vlen_vlen(); /* Test compound datatypes with VL atomic components */ - test_vltypes_compound_vlstr(); /* Test data rewritten of nested VL data */ - test_vltypes_fill_value(); /* Test fill value for VL data */ -} /* test_vltypes() */ - -/*------------------------------------------------------------------------- - * Function: cleanup_vltypes - * - * Purpose: Cleanup temporary test files - * - * Return: none - *------------------------------------------------------------------------- - */ -void -cleanup_vltypes(void) -{ - H5Fdelete(FILENAME, H5P_DEFAULT); -} diff --git a/test/h5test.c b/test/h5test.c index ef580cf3072..8efebd7eb08 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -2067,6 +2067,86 @@ h5_check_if_file_locking_enabled(bool *is_enabled) return FAIL; } /* end h5_check_if_file_locking_enabled() */ +/*------------------------------------------------------------------------- + * Function: h5_using_native_vol + * + * Purpose: Checks if the VOL connector being used is (or the VOL + * connector stack being used resolves to) the native VOL + * connector. Either or both of fapl_id and obj_id may be + * provided, but checking of obj_id takes precedence. + * H5I_INVALID_HID should be specified for the parameter that + * is not provided. + * + * obj_id must be the ID of an HDF5 object that is accessed + * with the VOL connector to check. If obj_id is provided, the + * entire VOL connector stack is checked to see if it resolves + * to the native VOL connector. If only fapl_id is provided, + * only the top-most VOL connector set on fapl_id is checked + * against the native VOL connector. + * + * The HDF5_VOL_CONNECTOR environment variable is not checked + * here, as that only overrides the setting for the default + * File Access Property List, which may not be the File Access + * Property List used for accessing obj_id. There is also + * complexity in determining whether the connector stack + * resolves to the native VOL connector when the only + * information available is a string. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +h5_using_native_vol(hid_t fapl_id, hid_t obj_id, bool *is_native_vol) +{ + hbool_t is_native = false; + hid_t native_id = H5I_INVALID_HID; + hid_t vol_id = H5I_INVALID_HID; + herr_t ret_value = SUCCEED; + + assert((fapl_id >= 0) || (obj_id >= 0)); + assert(is_native_vol); + + if (fapl_id == H5P_DEFAULT) + fapl_id = H5P_FILE_ACCESS_DEFAULT; + + if (obj_id >= 0) { + if (H5VLobject_is_native(obj_id, &is_native) < 0) { + ret_value = FAIL; + goto done; + } + } + else { + if (true != H5VLis_connector_registered_by_value(H5VL_NATIVE_VALUE)) { + ret_value = FAIL; + goto done; + } + + if ((native_id = H5VLget_connector_id_by_value(H5VL_NATIVE_VALUE)) < 0) { + ret_value = FAIL; + goto done; + } + + if (H5Pget_vol_id(fapl_id, &vol_id) < 0) { + ret_value = FAIL; + goto done; + } + + if (vol_id == native_id) + is_native = true; + } + + *is_native_vol = is_native; + +done: + if (vol_id != H5I_INVALID_HID) + H5VLclose(vol_id); + if (native_id != H5I_INVALID_HID) + H5VLclose(native_id); + + return ret_value; +} + /*------------------------------------------------------------------------- * Function: h5_using_default_driver * @@ -2104,7 +2184,7 @@ h5_using_default_driver(const char *drv_name) * which are not currently supported for parallel HDF5, such * as writing of VL or region reference datatypes. * - * Return: true/false + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ diff --git a/test/h5test.h b/test/h5test.h index 8bba0777ef9..8115207a674 100644 --- a/test/h5test.h +++ b/test/h5test.h @@ -290,6 +290,7 @@ H5TEST_DLL const char *h5_get_version_string(H5F_libver_t libver); H5TEST_DLL int h5_compare_file_bytes(char *fname1, char *fname2); H5TEST_DLL int h5_duplicate_file_by_bytes(const char *orig, const char *dest); H5TEST_DLL herr_t h5_check_if_file_locking_enabled(bool *are_enabled); +H5TEST_DLL herr_t h5_using_native_vol(hid_t fapl_id, hid_t obj_id, bool *is_native_vol); H5TEST_DLL bool h5_using_default_driver(const char *drv_name); H5TEST_DLL herr_t h5_using_parallel_driver(hid_t fapl_id, bool *driver_is_parallel); H5TEST_DLL herr_t h5_driver_is_default_vfd_compatible(hid_t fapl_id, bool *default_vfd_compatible); diff --git a/test/tarray.c b/test/tarray.c index 494b65c559d..09f300dc311 100644 --- a/test/tarray.c +++ b/test/tarray.c @@ -1918,6 +1918,7 @@ test_compat(void) size_t off; /* Offset of compound field */ hid_t mtid; /* Datatype ID for field */ int i; /* Index variables */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Generic return value */ @@ -1934,17 +1935,26 @@ test_compat(void) * the tarrold.h5 file. */ - if (h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible) < 0) - TestErrPrintf("can't check if VFD is default VFD compatible\n"); - if (!driver_is_default_compatible) { - printf(" -- SKIPPED --\n"); - return; - } - /* Open the testfile */ fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK_I(fid1, "H5Fopen"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid1, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid1), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Check if VFD used is native file format compatible */ + CHECK(h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible), FAIL, + "h5_driver_is_default_vfd_compatible"); + if (!driver_is_default_compatible) { + CHECK(H5Fclose(fid1), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Only try to proceed if the file is around */ if (fid1 >= 0) { /* Open the first dataset (with no array fields) */ diff --git a/test/tattr.c b/test/tattr.c index 2e391c545d7..d38fdaabc8d 100644 --- a/test/tattr.c +++ b/test/tattr.c @@ -197,16 +197,24 @@ test_attr_basic_write(hid_t fapl) hsize_t dims3[] = {ATTR2_DIM1, ATTR2_DIM2}; int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */ int i; + bool vol_is_native; hid_t ret_id; /* Generic hid_t return value */ herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Basic Scalar Attribute Writing Functions\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fid1, FAIL, "H5Fcreate"); + CHECK(h5_using_native_vol(fapl, fid1, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataspace for dataset */ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); CHECK(sid1, FAIL, "H5Screate_simple"); @@ -267,9 +275,11 @@ test_attr_basic_write(hid_t fapl) ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1a); CHECK(ret, FAIL, "H5Awrite"); - /* Check storage size for attribute */ - attr_size = H5Aget_storage_size(attr); - VERIFY(attr_size, (ATTR1_DIM1 * sizeof(int)), "H5A_get_storage_size"); + if (vol_is_native) { + /* Check storage size for attribute */ + attr_size = H5Aget_storage_size(attr); + VERIFY(attr_size, (ATTR1_DIM1 * sizeof(int)), "H5A_get_storage_size"); + } /* Read attribute information immediately, without closing attribute */ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1); @@ -388,9 +398,11 @@ test_attr_basic_write(hid_t fapl) attr = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check storage size for attribute */ - attr_size = H5Aget_storage_size(attr); - VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5Aget_storage_size"); + if (vol_is_native) { + /* Check storage size for attribute */ + attr_size = H5Aget_storage_size(attr); + VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5Aget_storage_size"); + } /* Try to create the same attribute again (should fail) */ H5E_BEGIN_TRY @@ -404,9 +416,11 @@ test_attr_basic_write(hid_t fapl) ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data2); CHECK(ret, FAIL, "H5Awrite"); - /* Check storage size for attribute */ - attr_size = H5Aget_storage_size(attr); - VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5A_get_storage_size"); + if (vol_is_native) { + /* Check storage size for attribute */ + attr_size = H5Aget_storage_size(attr); + VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5A_get_storage_size"); + } /* Close attribute */ ret = H5Aclose(attr); @@ -537,6 +551,12 @@ test_attr_flush(hid_t fapl) /* Output message about test being performed */ MESSAGE(5, ("Testing Attribute Flushing\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILL_VALUES) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + fil = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fil, FAIL, "H5Fcreate"); @@ -728,6 +748,11 @@ test_attr_compound_write(hid_t fapl) /* Output message about test being performed */ MESSAGE(5, ("Testing Multiple Attribute Functions\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fid1, FAIL, "H5Fcreate"); @@ -970,6 +995,11 @@ test_attr_scalar_write(hid_t fapl) /* Output message about test being performed */ MESSAGE(5, ("Testing Basic Attribute Functions\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fid1, FAIL, "H5Fcreate"); @@ -1113,6 +1143,11 @@ test_attr_mult_write(hid_t fapl) /* Output message about test being performed */ MESSAGE(5, ("Testing Multiple Attribute Functions\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fid1, FAIL, "H5Fcreate"); @@ -1578,6 +1613,11 @@ test_attr_delete(hid_t fapl) /* Output message about test being performed */ MESSAGE(5, ("Testing Basic Attribute Deletion Functions\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Open file */ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); CHECK(fid1, FAIL, "H5Fopen"); @@ -1694,33 +1734,45 @@ test_attr_delete(hid_t fapl) static void test_attr_dtype_shared(hid_t fapl) { - hid_t file_id; /* File ID */ - hid_t dset_id; /* Dataset ID */ - hid_t space_id; /* Dataspace ID for dataset & attribute */ - hid_t type_id; /* Datatype ID for named datatype */ - hid_t attr_id; /* Attribute ID */ - int data = 8; /* Data to write */ - int rdata = 0; /* Read read in */ - H5O_info2_t oinfo; /* Object's information */ - h5_stat_size_t empty_filesize; /* Size of empty file */ - h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + hid_t file_id; /* File ID */ + hid_t dset_id; /* Dataset ID */ + hid_t space_id; /* Dataspace ID for dataset & attribute */ + hid_t type_id; /* Datatype ID for named datatype */ + hid_t attr_id; /* Attribute ID */ + int data = 8; /* Data to write */ + int rdata = 0; /* Read read in */ + H5O_info2_t oinfo; /* Object's information */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ + h5_stat_size_t filesize; /* Size of file after modifications */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Shared Datatypes with Attributes\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create a file */ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(file_id, FAIL, "H5Fopen"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, file_id, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(file_id); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -1854,9 +1906,11 @@ test_attr_dtype_shared(hid_t fapl) ret = H5Fclose(file_id); CHECK(ret, FAIL, "H5Fclose"); - /* Check size of file */ - filesize = h5_get_file_size(FILENAME, fapl); - VERIFY(filesize, empty_filesize, "h5_get_file_size"); + if (vol_is_native) { + /* Check size of file */ + filesize = h5_get_file_size(FILENAME, fapl); + VERIFY(filesize, empty_filesize, "h5_get_file_size"); + } } /* test_attr_dtype_shared() */ /**************************************************************** @@ -2194,25 +2248,36 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl) unsigned min_dense; /* Minimum # of attributes to store "densely" */ htri_t is_dense; /* Are attributes stored densely? */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Dense Attribute Storage Creation\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -2244,9 +2309,11 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes, until just before converting to dense storage */ for (u = 0; u < max_compact; u++) { @@ -2264,9 +2331,11 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); } /* end for */ - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add one more attribute, to push into "dense" storage */ /* Create attribute */ @@ -2274,9 +2343,11 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Write data into the attribute */ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); @@ -2310,7 +2381,7 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -2336,9 +2407,10 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl) unsigned min_dense; /* Minimum # of attributes to store "densely" */ htri_t is_dense; /* Are attributes stored densely? */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Opening Attributes in Dense Storage\n")); @@ -2347,14 +2419,19 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -2390,9 +2467,11 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes, until just before converting to dense storage */ for (u = 0; u < max_compact; u++) { @@ -2414,9 +2493,11 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "test_attr_dense_verify"); } /* end for */ - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add one more attribute, to push into "dense" storage */ /* Create attribute */ @@ -2424,9 +2505,11 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Write data into the attribute */ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u); @@ -2456,7 +2539,7 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -2482,10 +2565,11 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl) unsigned min_dense; /* Minimum # of attributes to store "densely" */ htri_t is_dense; /* Are attributes stored densely? */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ H5O_info2_t oinfo; /* Object info */ int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); + bool vol_is_native; herr_t ret; /* Generic return value */ /* Only run this test for sec2/default driver */ @@ -2509,14 +2593,19 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl) if (use_min_dset_oh) CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -2552,9 +2641,11 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes, until well into dense storage */ for (u = 0; u < (max_compact * 2); u++) { @@ -2577,9 +2668,11 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl) VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); } /* end for */ - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Close dataspace */ ret = H5Sclose(sid); @@ -2613,18 +2706,22 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "test_attr_dense_verify"); } /* end for */ - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Delete one more attribute, which should cause reversion to compact storage */ snprintf(attrname, sizeof(attrname), "attr %02u", u); ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Verify attributes still left */ ret = test_attr_dense_verify(dataset, (u - 1)); @@ -2635,9 +2732,11 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl) ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Verify attributes still left */ ret = test_attr_dense_verify(dataset, (u - 2)); @@ -2655,7 +2754,7 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -2681,13 +2780,14 @@ test_attr_dense_rename(hid_t fcpl, hid_t fapl) unsigned max_compact; /* Maximum # of attributes to store compactly */ unsigned min_dense; /* Minimum # of attributes to store "densely" */ htri_t is_dense; /* Are attributes stored densely? */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ H5O_info2_t oinfo; /* Object info */ unsigned u; /* Local index variable */ int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); unsigned use_corder; /* Track creation order or not */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Only run this test for sec2/default driver */ if (!h5_using_default_driver(NULL)) @@ -2710,14 +2810,19 @@ test_attr_dense_rename(hid_t fcpl, hid_t fapl) if (use_min_dset_oh) CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -2753,9 +2858,11 @@ test_attr_dense_rename(hid_t fcpl, hid_t fapl) dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes, until well into dense storage */ for (u = 0; u < (max_compact * 2); u++) { @@ -2785,9 +2892,11 @@ test_attr_dense_rename(hid_t fcpl, hid_t fapl) VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); } /* end for */ - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Close Dataset */ ret = H5Dclose(dataset); @@ -2852,7 +2961,7 @@ test_attr_dense_rename(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -2878,11 +2987,12 @@ test_attr_dense_unlink(hid_t fcpl, hid_t fapl) unsigned min_dense; /* Minimum # of attributes to store "densely" */ htri_t is_dense; /* Are attributes stored densely? */ size_t mesg_count; /* # of shared messages */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ H5O_info2_t oinfo; /* Object info */ unsigned u; /* Local index variable */ int use_min_dset_oh = (dcpl_g != H5P_DEFAULT); + bool vol_is_native; herr_t ret; /* Generic return value */ /* Only run this test for sec2/default driver */ @@ -2906,12 +3016,17 @@ test_attr_dense_unlink(hid_t fcpl, hid_t fapl) if (use_min_dset_oh) CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -2943,9 +3058,11 @@ test_attr_dense_unlink(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes, until well into dense storage */ for (u = 0; u < (max_compact * 2); u++) { @@ -2968,9 +3085,11 @@ test_attr_dense_unlink(hid_t fcpl, hid_t fapl) VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3"); } /* end for */ - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Close dataspace */ ret = H5Sclose(sid); @@ -2992,16 +3111,18 @@ test_attr_dense_unlink(hid_t fcpl, hid_t fapl) ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); CHECK(ret, FAIL, "H5Ldelete"); - /* Check on dataset's attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -3027,9 +3148,10 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) unsigned min_dense, rmin_dense; /* Minimum # of attributes to store "densely" */ htri_t is_dense; /* Are attributes stored densely? */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Phase Change Limits For Attributes in Dense Storage\n")); @@ -3038,14 +3160,19 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -3085,9 +3212,11 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add first attribute, which should be immediately in dense storage */ @@ -3105,9 +3234,11 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Add second attribute, to allow deletions to be checked easily */ @@ -3125,9 +3256,11 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Delete second attribute, attributes should still be stored densely */ @@ -3135,9 +3268,11 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Delete first attribute, attributes should not be stored densely */ @@ -3147,9 +3282,11 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Close dataspace */ ret = H5Sclose(sid); @@ -3167,7 +3304,7 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -3199,7 +3336,8 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) unsigned min_dense; /* Minimum # of attributes to store "densely" */ htri_t is_dense; /* Are attributes stored densely? */ unsigned u, i; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing operations with two IDs for Dense Storage\n")); @@ -3211,6 +3349,9 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); @@ -3245,9 +3386,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes, until just before converting to dense storage */ for (u = 0; u < max_compact; u++) { @@ -3265,9 +3408,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); } /* end for */ - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add one more attribute, to push into "dense" storage */ /* Create dataspace for attribute */ @@ -3279,9 +3424,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Open the attribute just created and get a second ID */ attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT); @@ -3314,9 +3461,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); CHECK(dataset, FAIL, "H5Dopen2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Open first attribute for the dataset */ attr = H5Aopen(dataset, attrname, H5P_DEFAULT); @@ -3366,9 +3515,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); CHECK(dataset, FAIL, "H5Dopen2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Open first attribute for the dataset */ attr = H5Aopen(dataset, attrname, H5P_DEFAULT); @@ -3429,9 +3580,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); CHECK(dataset, FAIL, "H5Dopen2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Open first attribute for the dataset */ attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT); @@ -3483,9 +3636,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); CHECK(dataset, FAIL, "H5Dopen2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Open attribute of the dataset for the first time */ attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); @@ -3497,9 +3652,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Adelete_by_idx"); } - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Open attribute for the second time */ attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT); @@ -3547,9 +3704,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); CHECK(dataset, FAIL, "H5Dopen2"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Open attribute of the dataset for the first time */ attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT); @@ -3571,9 +3730,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); } - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Open attribute for the second time */ attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT); @@ -3643,9 +3804,11 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl) attr = H5Acreate2(gid1, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check on group's attribute storage status */ - is_dense = H5O__is_attr_dense_test(gid1); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on group's attribute storage status */ + is_dense = H5O__is_attr_dense_test(gid1); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Open the hard link just created */ gid2 = H5Gopen2(fid, GROUP2_NAME, H5P_DEFAULT); @@ -3720,9 +3883,10 @@ test_attr_big(hid_t fcpl, hid_t fapl) htri_t is_empty; /* Are there any attributes? */ htri_t is_dense; /* Are attributes stored densely? */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Storing 'Big' Attributes in Dense Storage\n")); @@ -3731,14 +3895,19 @@ test_attr_big(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -3782,11 +3951,13 @@ test_attr_big(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add first "small" attribute, which should be in compact storage */ @@ -3800,11 +3971,13 @@ test_attr_big(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add second "small" attribute, which should stay in compact storage */ @@ -3818,11 +3991,13 @@ test_attr_big(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add first "big" attribute, which should push storage into dense form */ @@ -3842,10 +4017,12 @@ test_attr_big(hid_t fcpl, hid_t fapl) * message heap instead of forcing the attribute storage into the dense * form - QAK) */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); + if (vol_is_native) { + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); + } /* Add second "big" attribute, which should leave storage in dense form */ @@ -3864,10 +4041,12 @@ test_attr_big(hid_t fcpl, hid_t fapl) * message heap instead of forcing the attribute storage into the dense * form - QAK) */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); + if (vol_is_native) { + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); + } /* Delete second "small" attribute, attributes should still be stored densely */ @@ -3877,25 +4056,29 @@ test_attr_big(hid_t fcpl, hid_t fapl) ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); - - /* Delete second "big" attribute, attributes should still be stored densely */ - + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); + } + + /* Delete second "big" attribute, attributes should still be stored densely */ + /* Delete attribute */ u = 3; snprintf(attrname, sizeof(attrname), "attr %02u", u); ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, (nshared_indices ? false : true), "H5O__is_attr_dense_test"); + } /* Delete first "big" attribute, attributes should _not_ be stored densely */ @@ -3905,11 +4088,13 @@ test_attr_big(hid_t fcpl, hid_t fapl) ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Delete first "small" attribute, should be no attributes now */ @@ -3919,11 +4104,13 @@ test_attr_big(hid_t fcpl, hid_t fapl) ret = H5Adelete(dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + } } /* end if */ - else { + else if (vol_is_native) { /* Shouldn't be able to create "big" attributes with older version of format */ VERIFY(attr, FAIL, "H5Acreate2"); @@ -3956,7 +4143,7 @@ test_attr_big(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -3981,11 +4168,12 @@ test_attr_null_space(hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned value; /* Attribute value */ htri_t cmp; /* Results of comparison */ - hsize_t storage_size; /* Size of storage for attribute */ + hsize_t storage_size = 0; /* Size of storage for attribute */ H5A_info_t ainfo; /* Attribute info */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Storing Attributes with 'null' dataspace\n")); @@ -3994,14 +4182,19 @@ test_attr_null_space(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -4046,14 +4239,19 @@ test_attr_null_space(hid_t fcpl, hid_t fapl) ret = H5Sclose(attr_sid); CHECK(ret, FAIL, "H5Sclose"); - /* Check the storage size for the attribute */ - storage_size = H5Aget_storage_size(attr); - VERIFY(storage_size, 0, "H5Aget_storage_size"); + if (vol_is_native) { + /* Check the storage size for the attribute */ + storage_size = H5Aget_storage_size(attr); + VERIFY(storage_size, 0, "H5Aget_storage_size"); + } /* Get the attribute info */ ret = H5Aget_info(attr, &ainfo); CHECK(ret, FAIL, "H5Aget_info"); - VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); + + if (vol_is_native) { + VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); + } /* Close attribute */ ret = H5Aclose(attr); @@ -4120,14 +4318,19 @@ test_attr_null_space(hid_t fcpl, hid_t fapl) ret = H5Sclose(attr_sid); CHECK(ret, FAIL, "H5Sclose"); - /* Check the storage size for the attribute */ - storage_size = H5Aget_storage_size(attr); - VERIFY(storage_size, 0, "H5Aget_storage_size"); + if (vol_is_native) { + /* Check the storage size for the attribute */ + storage_size = H5Aget_storage_size(attr); + VERIFY(storage_size, 0, "H5Aget_storage_size"); + } /* Get the attribute info */ ret = H5Aget_info(attr, &ainfo); CHECK(ret, FAIL, "H5Aget_info"); - VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); + + if (vol_is_native) { + VERIFY(ainfo.data_size, storage_size, "H5Aget_info"); + } /* Close attribute */ ret = H5Aclose(attr); @@ -4167,7 +4370,7 @@ test_attr_null_space(hid_t fcpl, hid_t fapl) ret = H5Sclose(null_sid); CHECK(ret, FAIL, "H5Sclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -4424,7 +4627,8 @@ test_attr_corder_create_basic(hid_t fcpl, hid_t fapl) unsigned crt_order_flags; /* Creation order flags */ htri_t is_empty; /* Are there any attributes? */ htri_t is_dense; /* Are attributes stored densely? */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Basic Code for Attributes with Creation Order Info\n")); @@ -4433,6 +4637,9 @@ test_attr_corder_create_basic(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataset creation property list */ if (dcpl_g == H5P_DEFAULT) { dcpl = H5Pcreate(H5P_DATASET_CREATE); @@ -4478,11 +4685,13 @@ test_attr_corder_create_basic(hid_t fcpl, hid_t fapl) ret = H5Sclose(sid); CHECK(ret, FAIL, "H5Sclose"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Close Dataset */ ret = H5Dclose(dataset); @@ -4504,11 +4713,13 @@ test_attr_corder_create_basic(hid_t fcpl, hid_t fapl) dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT); CHECK(dataset, FAIL, "H5Dopen2"); - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Retrieve dataset creation property list for group */ dcpl = H5Dget_create_plist(dataset); @@ -4555,7 +4766,8 @@ test_attr_corder_create_compact(hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Compact Storage of Attributes with Creation Order Info\n")); @@ -4564,6 +4776,9 @@ test_attr_corder_create_compact(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataset creation property list */ if (dcpl_g == H5P_DEFAULT) { dcpl = H5Pcreate(H5P_DATASET_CREATE); @@ -4613,11 +4828,13 @@ test_attr_corder_create_compact(hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Create several attributes, but keep storage in compact form */ for (u = 0; u < max_compact; u++) { @@ -4634,14 +4851,16 @@ test_attr_corder_create_compact(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } } /* end for */ } /* end for */ @@ -4696,14 +4915,16 @@ test_attr_corder_create_compact(hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Loop through attributes, checking their creation order values */ /* (the name index is used, but the creation order value is in the same order) */ @@ -4759,7 +4980,8 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Dense Storage of Attributes with Creation Order Info\n")); @@ -4768,6 +4990,9 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataset creation property list */ if (dcpl_g == H5P_DEFAULT) { dcpl = H5Pcreate(H5P_DATASET_CREATE); @@ -4817,11 +5042,13 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Create several attributes, but keep storage in compact form */ for (u = 0; u < max_compact; u++) { @@ -4838,14 +5065,16 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } } /* end for */ /* Create another attribute, to push into dense storage */ @@ -4861,19 +5090,21 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } } /* end for */ /* Close Datasets */ @@ -4927,14 +5158,16 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Loop through attributes, checking their creation order values */ /* (the name index is used, but the creation order value is in the same order) */ @@ -5096,7 +5329,8 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Storage Transitions of Attributes with Creation Order Info\n")); @@ -5105,6 +5339,9 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataset creation property list */ if (dcpl_g == H5P_DEFAULT) { dcpl = H5Pcreate(H5P_DATASET_CREATE); @@ -5158,11 +5395,13 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } } /* end for */ /* Close Datasets */ @@ -5227,14 +5466,16 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (u + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } } /* end for */ /* Create another attribute, to push into dense storage */ @@ -5250,30 +5491,11 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - - /* Delete several attributes from object, until attribute storage resumes compact form */ - for (u = max_compact; u >= min_dense; u--) { - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); - + if (vol_is_native) { /* Verify state of object */ ret = H5O__num_attrs_test(my_dataset, &nattrs); CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, u, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); is_empty = H5O__is_attr_empty_test(my_dataset); VERIFY(is_empty, false, "H5O__is_attr_empty_test"); is_dense = H5O__is_attr_dense_test(my_dataset); @@ -5283,6 +5505,29 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); CHECK(ret, FAIL, "H5O__attr_dense_info_test"); VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } + + /* Delete several attributes from object, until attribute storage resumes compact form */ + for (u = max_compact; u >= min_dense; u--) { + snprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); + + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, u, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } } /* end for */ /* Delete another attribute, to push attribute storage into compact form */ @@ -5290,14 +5535,16 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) ret = H5Adelete(my_dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Re-add attributes to get back into dense form */ for (u = (min_dense - 1); u < (max_compact + 1); u++) { @@ -5315,19 +5562,21 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } } /* end for */ /* Close Datasets */ @@ -5373,30 +5622,11 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - - /* Delete several attributes from object, until attribute storage resumes compact form */ - for (u = max_compact; u >= min_dense; u--) { - snprintf(attrname, sizeof(attrname), "attr %02u", u); - ret = H5Adelete(my_dataset, attrname); - CHECK(ret, FAIL, "H5Adelete"); - - /* Verify state of object */ + if (vol_is_native) { + /* Check on dataset's attribute storage status */ ret = H5O__num_attrs_test(my_dataset, &nattrs); CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, u, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); is_empty = H5O__is_attr_empty_test(my_dataset); VERIFY(is_empty, false, "H5O__is_attr_empty_test"); is_dense = H5O__is_attr_dense_test(my_dataset); @@ -5406,6 +5636,29 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); CHECK(ret, FAIL, "H5O__attr_dense_info_test"); VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } + + /* Delete several attributes from object, until attribute storage resumes compact form */ + for (u = max_compact; u >= min_dense; u--) { + snprintf(attrname, sizeof(attrname), "attr %02u", u); + ret = H5Adelete(my_dataset, attrname); + CHECK(ret, FAIL, "H5Adelete"); + + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, u, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } } /* end for */ /* Delete another attribute, to push attribute storage into compact form */ @@ -5413,14 +5666,16 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) ret = H5Adelete(my_dataset, attrname); CHECK(ret, FAIL, "H5Adelete"); - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Re-add attributes to get back into dense form */ for (u = (min_dense - 1); u < (max_compact + 1); u++) { @@ -5438,19 +5693,21 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } /* Delete all attributes */ for (u = max_compact; u > 0; u--) { @@ -5506,7 +5763,8 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Deleting Object w/Dense Attribute Storage and Creation Order Info\n")); @@ -5538,7 +5796,10 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl) for (reopen_file = false; reopen_file <= true; reopen_file++) { /* Create test file */ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fopen"); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); /* Create datasets */ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); @@ -5567,11 +5828,13 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Create attributes, until attribute storage is in dense form */ for (u = 0; u < max_compact * 2; u++) { @@ -5589,19 +5852,21 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + } } /* end for */ /* Close Datasets */ @@ -5706,22 +5971,32 @@ attr_info_by_idx_check(hid_t obj_id, const char *attrname, hsize_t n, bool use_i * index. */ if (use_index) { + H5_iter_order_t order; + bool vol_is_native; + + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, obj_id, &vol_is_native), FAIL, "h5_using_native_vol"); + + if (vol_is_native) + order = H5_ITER_NATIVE; + else + order = H5_ITER_INC; + /* Verify the information for first attribute, in native creation order */ memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_NATIVE, (hsize_t)0, &ainfo, - H5P_DEFAULT); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, order, (hsize_t)0, &ainfo, H5P_DEFAULT); CHECK(ret, FAIL, "H5Aget_info_by_idx"); VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx"); /* Verify the information for new attribute, in native creation order */ memset(&ainfo, 0, sizeof(ainfo)); - ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_NATIVE, n, &ainfo, H5P_DEFAULT); + ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, order, n, &ainfo, H5P_DEFAULT); CHECK(ret, FAIL, "H5Aget_info_by_idx"); VERIFY(ainfo.corder, n, "H5Aget_info_by_idx"); /* Verify the name for new link, in increasing native order */ memset(tmpname, 0, (size_t)NAME_BUF_SIZE); - ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_NATIVE, n, tmpname, + ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, order, n, tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT); CHECK(ret, FAIL, "H5Aget_name_by_idx"); if (strcmp(attrname, tmpname) != 0) @@ -5827,7 +6102,8 @@ test_attr_info_by_idx(bool new_format, hid_t fcpl, hid_t fapl) char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Create dataspace for dataset & attributes */ sid = H5Screate(H5S_SCALAR); @@ -5859,6 +6135,9 @@ test_attr_info_by_idx(bool new_format, hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Set attribute creation order tracking & indexing for object */ if (new_format == true) { ret = H5Pset_attr_creation_order( @@ -5893,11 +6172,13 @@ test_attr_info_by_idx(bool new_format, hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for query on non-existent attribute */ H5E_BEGIN_TRY @@ -5935,14 +6216,16 @@ test_attr_info_by_idx(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for out of bound offset queries */ H5E_BEGIN_TRY @@ -5982,32 +6265,36 @@ test_attr_info_by_idx(bool new_format, hid_t fcpl, hid_t fapl) ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Verify state of object */ - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); + } /* Verify information for new attribute */ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ + } /* Check for out of bound offset queries */ H5E_BEGIN_TRY @@ -6311,10 +6598,16 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ MESSAGE(5, ("Testing Deleting Attribute By Index\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create dataspace for dataset & attributes */ sid = H5Screate(H5S_SCALAR); CHECK(sid, FAIL, "H5Screate"); @@ -6381,6 +6674,9 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Set attribute creation order tracking & indexing for object */ if (new_format == true) { ret = H5Pset_attr_creation_order( @@ -6415,11 +6711,13 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for deleting non-existent attribute */ H5E_BEGIN_TRY @@ -6450,14 +6748,16 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for out of bound deletions */ H5E_BEGIN_TRY @@ -6522,9 +6822,11 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); CHECK(ret, FAIL, "H5Adelete_by_idx"); - /* Verify state of attribute storage (empty) */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + if (vol_is_native) { + /* Verify state of attribute storage (empty) */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + } } /* end for */ /* Work on all the datasets */ @@ -6563,33 +6865,35 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); /* Verify state of object */ - if (u >= max_compact) { + if (vol_is_native && (u >= max_compact)) { is_dense = H5O__is_attr_dense_test(my_dataset); VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - } /* end if */ + } /* Verify information for new attribute */ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index); CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ + } /* Check for out of bound deletion */ H5E_BEGIN_TRY @@ -6654,9 +6958,11 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); CHECK(ret, FAIL, "H5Adelete_by_idx"); - /* Verify state of attribute storage (empty) */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + if (vol_is_native) { + /* Verify state of attribute storage (empty) */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + } /* Check for deletion on empty attribute storage again */ H5E_BEGIN_TRY @@ -6705,7 +7011,7 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); /* Verify state of object */ - if (u >= max_compact) { + if (vol_is_native && (u >= max_compact)) { is_dense = H5O__is_attr_dense_test(my_dataset); VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); } /* end if */ @@ -6825,9 +7131,11 @@ test_attr_delete_by_idx(bool new_format, hid_t fcpl, hid_t fapl) ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT); CHECK(ret, FAIL, "H5Adelete_by_idx"); - /* Verify state of attribute storage (empty) */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + if (vol_is_native) { + /* Verify state of attribute storage (empty) */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + } /* Check for deletion on empty attribute storage again */ H5E_BEGIN_TRY @@ -6978,11 +7286,22 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx unsigned oskip; /* # of attributes to skip on object, with H5Aiterate1 */ #endif /* H5_NO_DEPRECATED_SYMBOLS */ int old_nerrs; /* Number of errors when entering this check */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Retrieve the current # of reported errors */ old_nerrs = GetTestNumErrs(); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { + SKIPPED(); + printf(" API functions for iterate aren't " + "supported with this connector\n"); + return 1; + } + + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, obj_id, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Iterate over attributes on object */ iter_info->nskipped = (unsigned)(skip = 0); iter_info->order = order; @@ -7029,20 +7348,22 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx VERIFY(iter_info->visited[v], true, "H5Aiterate_by_name"); #ifndef H5_NO_DEPRECATED_SYMBOLS - /* Iterate over attributes on object, with H5Aiterate1 */ - iter_info->nskipped = oskip = 0; - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate1"); - - /* Verify that we visited all the attributes */ - VERIFY(skip, max_attrs, "H5Aiterate1"); - for (v = 0; v < max_attrs; v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate1"); + if (vol_is_native) { + /* Iterate over attributes on object, with H5Aiterate1 */ + iter_info->nskipped = oskip = 0; + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); + ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate1"); + + /* Verify that we visited all the attributes */ + VERIFY(skip, max_attrs, "H5Aiterate1"); + for (v = 0; v < max_attrs; v++) + VERIFY(iter_info->visited[v], true, "H5Aiterate1"); + } #endif /* H5_NO_DEPRECATED_SYMBOLS */ /* Skip over some attributes on object */ @@ -7139,36 +7460,38 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx } /* end else */ #ifndef H5_NO_DEPRECATED_SYMBOLS - /* Skip over some attributes on object, with H5Aiterate1 */ - iter_info->nskipped = oskip = max_attrs / 2; - iter_info->order = order; - iter_info->stop = -1; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? (unsigned)oskip : ((max_attrs - 1) - oskip); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate1"); - - /* Verify that we visited all the links */ - VERIFY(oskip, max_attrs, "H5Aiterate1"); - if (order == H5_ITER_INC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v + (max_attrs / 2)], true, "H5Aiterate1"); - } /* end if */ - else if (order == H5_ITER_DEC) { - for (v = 0; v < (max_attrs / 2); v++) - VERIFY(iter_info->visited[v], true, "H5Aiterate1"); - } /* end if */ - else { - unsigned nvisit = 0; /* # of links visited */ + if (vol_is_native) { + /* Skip over some attributes on object, with H5Aiterate1 */ + iter_info->nskipped = oskip = max_attrs / 2; + iter_info->order = order; + iter_info->stop = -1; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? (unsigned)oskip : ((max_attrs - 1) - oskip); + memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); + ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate1"); + + /* Verify that we visited all the links */ + VERIFY(oskip, max_attrs, "H5Aiterate1"); + if (order == H5_ITER_INC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v + (max_attrs / 2)], true, "H5Aiterate1"); + } /* end if */ + else if (order == H5_ITER_DEC) { + for (v = 0; v < (max_attrs / 2); v++) + VERIFY(iter_info->visited[v], true, "H5Aiterate1"); + } /* end if */ + else { + unsigned nvisit = 0; /* # of links visited */ - assert(order == H5_ITER_NATIVE); - for (v = 0; v < max_attrs; v++) - if (iter_info->visited[v] == true) - nvisit++; + assert(order == H5_ITER_NATIVE); + for (v = 0; v < max_attrs; v++) + if (iter_info->visited[v] == true) + nvisit++; - VERIFY(nvisit, max_attrs, "H5Aiterate1"); - } /* end else */ + VERIFY(nvisit, max_attrs, "H5Aiterate1"); + } /* end else */ + } #endif /* H5_NO_DEPRECATED_SYMBOLS */ /* Iterate over attributes on object, stopping in the middle */ @@ -7208,17 +7531,19 @@ attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx VERIFY(iter_info->ncalled, 3, "H5Aiterate_by_name"); #ifndef H5_NO_DEPRECATED_SYMBOLS - /* Iterate over attributes on object, stopping in the middle, with H5Aiterate1() */ - iter_info->nskipped = oskip = 0; - iter_info->order = order; - iter_info->stop = 3; - iter_info->ncalled = 0; - iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); - memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); - ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); - CHECK(ret, FAIL, "H5Aiterate1"); - VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate1"); - VERIFY(iter_info->ncalled, 3, "H5Aiterate1"); + if (vol_is_native) { + /* Iterate over attributes on object, stopping in the middle, with H5Aiterate1() */ + iter_info->nskipped = oskip = 0; + iter_info->order = order; + iter_info->stop = 3; + iter_info->ncalled = 0; + iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1); + memset(iter_info->visited, 0, sizeof(bool) * iter_info->max_visit); + ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info); + CHECK(ret, FAIL, "H5Aiterate1"); + VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate1"); + VERIFY(iter_info->ncalled, 3, "H5Aiterate1"); + } #endif /* H5_NO_DEPRECATED_SYMBOLS */ /* Check for iteration routine indicating failure */ @@ -7287,7 +7612,11 @@ test_attr_iterate2(bool new_format, hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ + + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) + return; /* Create dataspace for dataset & attributes */ sid = H5Screate(H5S_SCALAR); @@ -7361,6 +7690,9 @@ test_attr_iterate2(bool new_format, hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Set attribute creation order tracking & indexing for object */ if (new_format == true) { ret = H5Pset_attr_creation_order( @@ -7398,11 +7730,13 @@ test_attr_iterate2(bool new_format, hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for iterating over object with no attributes (should be OK) */ ret = H5Aiterate2(my_dataset, idx_type, order, NULL, attr_iterate2_cb, NULL); @@ -7437,45 +7771,49 @@ test_attr_iterate2(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - - /* Check for out of bound iteration */ - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate2"); - - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, - H5P_DEFAULT); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, - NULL, H5P_DEFAULT); + if (vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) { + /* Check for out of bound iteration */ + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Aiterate2"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + /* Test iteration over attributes stored compactly */ + ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); + CHECK(ret, FAIL, "attr_iterate_check"); } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - /* Test iteration over attributes stored compactly */ - ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); - CHECK(ret, FAIL, "attr_iterate_check"); } /* end for */ /* Work on all the datasets */ @@ -7517,7 +7855,7 @@ test_attr_iterate2(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); /* Verify state of object */ - if (u >= max_compact) { + if (vol_is_native && (u >= max_compact)) { is_dense = H5O__is_attr_dense_test(my_dataset); VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); } /* end if */ @@ -7527,55 +7865,59 @@ test_attr_iterate2(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ - - /* Check for out of bound iteration */ - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate2"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL, - H5P_DEFAULT); + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - idx = u; - H5E_BEGIN_TRY - { - ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, - NULL, H5P_DEFAULT); + if (vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) { + /* Check for out of bound iteration */ + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Aiterate2"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + idx = u; + H5E_BEGIN_TRY + { + ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb, + NULL, H5P_DEFAULT); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Aiterate_by_name"); + + /* Test iteration over attributes stored densely */ + ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); + CHECK(ret, FAIL, "attr_iterate_check"); } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Aiterate_by_name"); - - /* Test iteration over attributes stored densely */ - ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info); - CHECK(ret, FAIL, "attr_iterate_check"); - } /* end for */ + } /* Close Datasets */ ret = H5Dclose(dset1); @@ -7687,8 +8029,12 @@ test_attr_open_by_idx(bool new_format, hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + hid_t ret_id; /* Generic hid_t return value */ + herr_t ret; /* Generic return value */ + + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) + return; /* Create dataspace for dataset & attributes */ sid = H5Screate(H5S_SCALAR); @@ -7756,6 +8102,9 @@ test_attr_open_by_idx(bool new_format, hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Set attribute creation order tracking & indexing for object */ if (new_format == true) { ret = H5Pset_attr_creation_order( @@ -7790,11 +8139,13 @@ test_attr_open_by_idx(bool new_format, hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for opening an attribute on an object with no attributes */ H5E_BEGIN_TRY @@ -7826,14 +8177,16 @@ test_attr_open_by_idx(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for out of bound opening an attribute on an object */ H5E_BEGIN_TRY @@ -7885,7 +8238,7 @@ test_attr_open_by_idx(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); /* Verify state of object */ - if (u >= max_compact) { + if (vol_is_native && (u >= max_compact)) { is_dense = H5O__is_attr_dense_test(my_dataset); VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); } /* end if */ @@ -7895,23 +8248,25 @@ test_attr_open_by_idx(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ + } /* Check for out of bound opening an attribute on an object */ H5E_BEGIN_TRY @@ -8057,8 +8412,9 @@ test_attr_open_by_name(bool new_format, hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - hid_t ret_id; /* Generic hid_t return value */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + hid_t ret_id; /* Generic hid_t return value */ + herr_t ret; /* Generic return value */ /* Create dataspace for dataset & attributes */ sid = H5Screate(H5S_SCALAR); @@ -8090,6 +8446,9 @@ test_attr_open_by_name(bool new_format, hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Set attribute creation order tracking & indexing for object */ if (new_format == true) { ret = H5Pset_attr_creation_order( @@ -8127,11 +8486,13 @@ test_attr_open_by_name(bool new_format, hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for opening a non-existent attribute on an object with no attributes */ H5E_BEGIN_TRY @@ -8175,14 +8536,16 @@ test_attr_open_by_name(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Check for opening a non-existent attribute on an object with compact attribute storage */ H5E_BEGIN_TRY @@ -8249,7 +8612,7 @@ test_attr_open_by_name(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); /* Verify state of object */ - if (u >= max_compact) { + if (vol_is_native && (u >= max_compact)) { is_dense = H5O__is_attr_dense_test(my_dataset); VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); } /* end if */ @@ -8259,23 +8622,25 @@ test_attr_open_by_name(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ + } /* Check for opening a non-existent attribute on an object with dense attribute storage */ H5E_BEGIN_TRY @@ -8353,7 +8718,8 @@ test_attr_create_by_name(bool new_format, hid_t fcpl, hid_t fapl) char attrname[NAME_BUF_SIZE]; /* Name of attribute */ unsigned curr_dset; /* Current dataset to work on */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Create dataspace for dataset & attributes */ sid = H5Screate(H5S_SCALAR); @@ -8385,6 +8751,9 @@ test_attr_create_by_name(bool new_format, hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Set attribute creation order tracking & indexing for object */ if (new_format == true) { ret = H5Pset_attr_creation_order( @@ -8422,11 +8791,13 @@ test_attr_create_by_name(bool new_format, hid_t fcpl, hid_t fapl) assert(0 && "Too many datasets!"); } /* end switch */ - /* Check on dataset's attribute storage status */ - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, true, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, true, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Create attributes, up to limit of compact form */ for (u = 0; u < max_compact; u++) { @@ -8449,14 +8820,16 @@ test_attr_create_by_name(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, max_compact, "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Test opening attributes stored compactly */ ret = attr_open_check(fid, dsetname, my_dataset, u); @@ -8502,7 +8875,7 @@ test_attr_create_by_name(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Aclose"); /* Verify state of object */ - if (u >= max_compact) { + if (vol_is_native && (u >= max_compact)) { is_dense = H5O__is_attr_dense_test(my_dataset); VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); } /* end if */ @@ -8512,23 +8885,25 @@ test_attr_create_by_name(bool new_format, hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "attr_info_by_idx_check"); } /* end for */ - /* Verify state of object */ - ret = H5O__num_attrs_test(my_dataset, &nattrs); - CHECK(ret, FAIL, "H5O__num_attrs_test"); - VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); - is_empty = H5O__is_attr_empty_test(my_dataset); - VERIFY(is_empty, false, "H5O__is_attr_empty_test"); - is_dense = H5O__is_attr_dense_test(my_dataset); - VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Verify state of object */ + ret = H5O__num_attrs_test(my_dataset, &nattrs); + CHECK(ret, FAIL, "H5O__num_attrs_test"); + VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test"); + is_empty = H5O__is_attr_empty_test(my_dataset); + VERIFY(is_empty, false, "H5O__is_attr_empty_test"); + is_dense = H5O__is_attr_dense_test(my_dataset); + VERIFY(is_dense, (new_format ? true : false), "H5O__is_attr_dense_test"); - if (new_format) { - /* Retrieve & verify # of records in the name & creation order indices */ - ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); - CHECK(ret, FAIL, "H5O__attr_dense_info_test"); - if (use_index) - VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); - VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); - } /* end if */ + if (new_format) { + /* Retrieve & verify # of records in the name & creation order indices */ + ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count); + CHECK(ret, FAIL, "H5O__attr_dense_info_test"); + if (use_index) + VERIFY(name_count, corder_count, "H5O__attr_dense_info_test"); + VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test"); + } /* end if */ + } /* Test opening attributes stored compactly */ ret = attr_open_check(fid, dsetname, my_dataset, u); @@ -8585,9 +8960,10 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) size_t mesg_count; /* # of shared messages */ unsigned test_shared; /* Index over shared component type */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Writing Shared & Unshared Attributes in Compact & Dense Storage\n")); @@ -8644,18 +9020,23 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close FCPL copy */ ret = H5Pclose(my_fcpl); CHECK(ret, FAIL, "H5Pclose"); /* Close file */ ret = H5Fclose(fid); - CHECK(ret, FAIL, "H5Fclose"); - - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + CHECK(ret, FAIL, "H5Fclose"); + + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -8684,7 +9065,7 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) CHECK(dataset2, FAIL, "H5Dcreate2"); /* Check on dataset's message storage status */ - if (test_shared != 0) { + if (vol_is_native && (test_shared != 0)) { /* Datasets' datatypes can be shared */ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); @@ -8704,11 +9085,13 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes to each dataset, until after converting to dense storage */ for (u = 0; u < max_compact * 2; u++) { @@ -8721,9 +9104,11 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -8735,36 +9120,42 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Alternate between creating "small" & "big" attributes */ if (u % 2) { @@ -8772,9 +9163,11 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -8786,36 +9179,42 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } } /* end for */ /* Close attribute's datatype */ @@ -8829,7 +9228,7 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Dclose"); /* Check on shared message status now */ - if (test_shared != 0) { + if (vol_is_native && (test_shared != 0)) { if (test_shared == 1) { /* Check on datatype storage status */ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); @@ -8841,7 +9240,7 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test"); - } /* end if */ + } /* Unlink datasets with attributes */ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT); @@ -8855,28 +9254,30 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Ldelete"); } /* end if */ - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + if (vol_is_native) { + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ + } /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -8922,9 +9323,10 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) size_t mesg_count; /* # of shared messages */ unsigned test_shared; /* Index over shared component type */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Renaming Shared & Unshared Attributes in Compact & Dense Storage\n")); @@ -8981,6 +9383,9 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close FCPL copy */ ret = H5Pclose(my_fcpl); CHECK(ret, FAIL, "H5Pclose"); @@ -8989,10 +9394,12 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -9021,7 +9428,7 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) CHECK(dataset2, FAIL, "H5Dcreate2"); /* Check on dataset's message storage status */ - if (test_shared != 0) { + if (vol_is_native && (test_shared != 0)) { /* Datasets' datatypes can be shared */ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); @@ -9041,11 +9448,13 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes to each dataset, until after converting to dense storage */ for (u = 0; u < max_compact * 2; u++) { @@ -9058,9 +9467,11 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -9072,36 +9483,42 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Alternate between creating "small" & "big" attributes */ if (u % 2) { @@ -9109,9 +9526,11 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -9123,36 +9542,42 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Create new attribute name */ snprintf(attrname2, sizeof(attrname2), "new attr %02u", u); @@ -9167,21 +9592,23 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Aopen(dataset2, attrname2, H5P_DEFAULT); CHECK(attr, FAIL, "H5Aopen"); - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ + if (vol_is_native) { + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ + } /* Close attribute */ ret = H5Aclose(attr); @@ -9191,21 +9618,23 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Aopen(dataset, attrname, H5P_DEFAULT); CHECK(attr, FAIL, "H5Aopen"); - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ + if (vol_is_native) { + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ + } /* Close attribute */ ret = H5Aclose(attr); @@ -9221,21 +9650,23 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Aopen(dataset2, attrname, H5P_DEFAULT); CHECK(attr, FAIL, "H5Aopen"); - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); - } /* end else */ + if (vol_is_native) { + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } /* end else */ + } /* Close attribute */ ret = H5Aclose(attr); @@ -9245,21 +9676,23 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) attr = H5Aopen(dataset, attrname, H5P_DEFAULT); CHECK(attr, FAIL, "H5Aopen"); - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); - } /* end else */ + if (vol_is_native) { + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } /* end else */ + } /* Close attribute */ ret = H5Aclose(attr); @@ -9277,7 +9710,7 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Dclose"); /* Check on shared message status now */ - if (test_shared != 0) { + if (vol_is_native && (test_shared != 0)) { if (test_shared == 1) { /* Check on datatype storage status */ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); @@ -9303,28 +9736,30 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Ldelete"); } /* end if */ - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + if (vol_is_native) { + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ + } /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -9369,9 +9804,10 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) size_t mesg_count; /* # of shared messages */ unsigned test_shared; /* Index over shared component type */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Deleting Shared & Unshared Attributes in Compact & Dense Storage\n")); @@ -9428,6 +9864,9 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close FCPL copy */ ret = H5Pclose(my_fcpl); CHECK(ret, FAIL, "H5Pclose"); @@ -9436,10 +9875,12 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -9468,7 +9909,7 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) CHECK(dataset2, FAIL, "H5Dcreate2"); /* Check on dataset's message storage status */ - if (test_shared != 0) { + if (vol_is_native && (test_shared != 0)) { /* Datasets' datatypes can be shared */ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); @@ -9488,11 +9929,13 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes to each dataset, until after converting to dense storage */ for (u = 0; u < max_compact * 2; u++) { @@ -9505,9 +9948,11 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -9519,36 +9964,42 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Alternate between creating "small" & "big" attributes */ if (u % 2) { @@ -9556,9 +10007,11 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -9570,36 +10023,42 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } } /* end for */ /* Delete attributes from second dataset */ @@ -9617,21 +10076,23 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) attr = H5Aopen(dataset, attrname, H5P_DEFAULT); CHECK(attr, FAIL, "H5Aopen"); - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ + if (vol_is_native) { + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ + } /* Close attribute */ ret = H5Aclose(attr); @@ -9649,7 +10110,7 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Dclose"); /* Check on shared message status now */ - if (test_shared != 0) { + if (vol_is_native && (test_shared != 0)) { if (test_shared == 1) { /* Check on datatype storage status */ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); @@ -9675,28 +10136,30 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Ldelete"); } /* end if */ - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + if (vol_is_native) { + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ + } /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -9741,9 +10204,10 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) size_t mesg_count; /* # of shared messages */ unsigned test_shared; /* Index over shared component type */ unsigned u; /* Local index variable */ - h5_stat_size_t empty_filesize; /* Size of empty file */ + h5_stat_size_t empty_filesize = 0; /* Size of empty file */ h5_stat_size_t filesize; /* Size of file after modifications */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Unlinking Object with Shared Attributes in Compact & Dense Storage\n")); @@ -9800,6 +10264,9 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close FCPL copy */ ret = H5Pclose(my_fcpl); CHECK(ret, FAIL, "H5Pclose"); @@ -9808,10 +10275,12 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - /* Get size of file */ - empty_filesize = h5_get_file_size(FILENAME, fapl); - if (empty_filesize < 0) - TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + if (vol_is_native) { + /* Get size of file */ + empty_filesize = h5_get_file_size(FILENAME, fapl); + if (empty_filesize < 0) + TestErrPrintf("Line %d: file size wrong!\n", __LINE__); + } /* Re-open file */ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl); @@ -9840,7 +10309,7 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) CHECK(dataset2, FAIL, "H5Dcreate2"); /* Check on dataset's message storage status */ - if (test_shared != 0) { + if (vol_is_native && (test_shared != 0)) { /* Datasets' datatypes can be shared */ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); @@ -9860,11 +10329,13 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) ret = H5Pclose(dcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Check on datasets' attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - is_dense = H5O__is_attr_dense_test(dataset2); - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on datasets' attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + is_dense = H5O__is_attr_dense_test(dataset2); + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + } /* Add attributes to each dataset, until after converting to dense storage */ for (u = 0; u < max_compact * 2; u++) { @@ -9877,9 +10348,11 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -9891,36 +10364,42 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* ChecFk that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* ChecFk that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Alternate between creating "small" & "big" attributes */ if (u % 2) { @@ -9928,9 +10407,11 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* Write data into the attribute */ attr_value = u + 1; @@ -9942,36 +10423,42 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, FAIL, "H5Acreate2"); - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); + if (vol_is_native) { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* Write data into the attribute */ big_value[0] = u + 1; ret = H5Awrite(attr, attr_tid, big_value); CHECK(ret, FAIL, "H5Awrite"); - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + if (vol_is_native) { + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test"); + } } /* end else */ /* Close attribute */ ret = H5Aclose(attr); CHECK(ret, FAIL, "H5Aclose"); - /* Check on dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset2); - if (u < max_compact) - VERIFY(is_dense, false, "H5O__is_attr_dense_test"); - else - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset2); + if (u < max_compact) + VERIFY(is_dense, false, "H5O__is_attr_dense_test"); + else + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } } /* end for */ /* Close attribute's datatype */ @@ -9986,9 +10473,11 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT); CHECK(ret, FAIL, "H5Ldelete"); - /* Check on first dataset's attribute storage status */ - is_dense = H5O__is_attr_dense_test(dataset); - VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + if (vol_is_native) { + /* Check on first dataset's attribute storage status */ + is_dense = H5O__is_attr_dense_test(dataset); + VERIFY(is_dense, true, "H5O__is_attr_dense_test"); + } /* Check ref count on attributes of first dataset */ for (u = 0; u < max_compact * 2; u++) { @@ -9999,21 +10488,23 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) attr = H5Aopen(dataset, attrname, H5P_DEFAULT); CHECK(attr, FAIL, "H5Aopen"); - if (u % 2) { - /* Check that attribute is not shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, false, "H5A__is_shared_test"); - } /* end if */ - else { - /* Check that attribute is shared */ - is_shared = H5A__is_shared_test(attr); - VERIFY(is_shared, true, "H5A__is_shared_test"); - - /* Check refcount for attribute */ - ret = H5A__get_shared_rc_test(attr, &shared_refcount); - CHECK(ret, FAIL, "H5A__get_shared_rc_test"); - VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); - } /* end else */ + if (vol_is_native) { + if (u % 2) { + /* Check that attribute is not shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, false, "H5A__is_shared_test"); + } /* end if */ + else { + /* Check that attribute is shared */ + is_shared = H5A__is_shared_test(attr); + VERIFY(is_shared, true, "H5A__is_shared_test"); + + /* Check refcount for attribute */ + ret = H5A__get_shared_rc_test(attr, &shared_refcount); + CHECK(ret, FAIL, "H5A__get_shared_rc_test"); + VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test"); + } /* end else */ + } /* Close attribute */ ret = H5Aclose(attr); @@ -10034,28 +10525,30 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl) CHECK(ret, FAIL, "H5Ldelete"); } /* end if */ - /* Check on attribute storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - - if (test_shared != 0) { - /* Check on datatype storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + if (vol_is_native) { + /* Check on attribute storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count); CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - /* Check on dataspace storage status */ - ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); - CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); - VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); - } /* end if */ + if (test_shared != 0) { + /* Check on datatype storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + + /* Check on dataspace storage status */ + ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count); + CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test"); + VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test"); + } /* end if */ + } /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); - if (h5_using_default_driver(NULL)) { + if (vol_is_native && h5_using_default_driver(NULL)) { /* Check size of file */ filesize = h5_get_file_size(FILENAME, fapl); VERIFY(filesize, empty_filesize, "h5_get_file_size"); @@ -10825,16 +11318,26 @@ test_attr_bug7(hid_t fcpl, hid_t fapl) hsize_t dims_s = 140; /* Small attribute dimensions */ hsize_t dims_l = 65480; /* Large attribute dimensions */ H5A_info_t ainfo; /* Attribute info */ - herr_t ret; /* Generic return status */ + bool vol_is_native; + herr_t ret; /* Generic return status */ /* Output message about test being performed */ MESSAGE(5, ("Testing adding and deleting large attributes\n")); + fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(fid, FAIL, "H5Fcreate"); + + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create committed datatype to operate on. Use a committed datatype so that * there is nothing after the object header and the first chunk can expand and * contract as necessary. */ - fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(fid, FAIL, "H5Fcreate"); tid = H5Tcopy(H5T_STD_I32LE); CHECK(tid, FAIL, "H5Tcopy"); ret = H5Tcommit2(fid, TYPE1_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -11337,7 +11840,8 @@ test_attr_delete_last_dense(hid_t fcpl, hid_t fapl) hsize_t dim2[2] = {DIM0, DIM1}; /* Dimension sizes */ int i, j; /* Local index variables */ double *data = NULL; /* Pointer to the data buffer */ - herr_t ret; /* Generic return status */ + bool vol_is_native; + herr_t ret; /* Generic return status */ /* Output message about test being performed */ MESSAGE(5, ("Testing Deleting the last large attribute stored densely\n")); @@ -11346,6 +11850,14 @@ test_attr_delete_last_dense(hid_t fcpl, hid_t fapl) fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create the group */ gid = H5Gcreate2(fid, GRPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(gid, FAIL, "H5Gcreate"); diff --git a/test/testhdf5.c b/test/testhdf5.c index 93da1d8ded5..660fee9e2cd 100644 --- a/test/testhdf5.c +++ b/test/testhdf5.c @@ -37,6 +37,15 @@ int main(int argc, char *argv[]) { + hid_t fapl_id = H5I_INVALID_HID; + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl_id, H5I_INVALID_HID, "H5Pcreate"); + + CHECK(H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g), FAIL, "H5Pget_vol_cap_flags"); + + H5Pclose(fapl_id); + /* Initialize testing framework */ TestInit(argv[0], NULL, NULL); diff --git a/test/tfile.c b/test/tfile.c index 24cc7ce000e..0f5bbd38e70 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -233,6 +233,11 @@ test_file_create(void) /* Output message about test being performed */ MESSAGE(5, ("Testing Low-Level File Creation I/O\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* First ensure the file does not exist */ H5E_BEGIN_TRY { @@ -540,6 +545,7 @@ test_file_open(const char *env_h5_drvr) unsigned iparm; unsigned iparm2; unsigned intent; + bool vol_is_native; herr_t ret; /*generic return value */ /* @@ -557,6 +563,9 @@ test_file_open(const char *env_h5_drvr) fid1 = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); CHECK(fid1, FAIL, "H5Fopen"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid1, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Get the intent */ ret = H5Fget_intent(fid1, &intent); CHECK(ret, FAIL, "H5Fget_intent"); @@ -598,57 +607,59 @@ test_file_open(const char *env_h5_drvr) /* Output message about test being performed */ MESSAGE(5, ("Testing 2 File Openings\n")); - /* Create file access property list */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - CHECK(fapl_id, FAIL, "H5Pcreate"); + if (vol_is_native) { + /* Create file access property list */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl_id, FAIL, "H5Pcreate"); - /* Set file close mode to H5F_CLOSE_WEAK */ - ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); - CHECK(ret, FAIL, "H5Pset_fclose_degree"); + /* Set file close mode to H5F_CLOSE_WEAK */ + ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK); + CHECK(ret, FAIL, "H5Pset_fclose_degree"); - /* Open file for first time */ - fid1 = H5Fopen(FILE2, H5F_ACC_RDONLY, fapl_id); - CHECK(fid1, FAIL, "H5Fopen"); + /* Open file for first time */ + fid1 = H5Fopen(FILE2, H5F_ACC_RDONLY, fapl_id); + CHECK(fid1, FAIL, "H5Fopen"); - /* Check the intent */ - ret = H5Fget_intent(fid1, &intent); - CHECK(ret, FAIL, "H5Fget_intent"); - VERIFY(intent, H5F_ACC_RDONLY, "H5Fget_intent"); + /* Check the intent */ + ret = H5Fget_intent(fid1, &intent); + CHECK(ret, FAIL, "H5Fget_intent"); + VERIFY(intent, H5F_ACC_RDONLY, "H5Fget_intent"); - /* Open dataset */ - did = H5Dopen2(fid1, F2_DSET, H5P_DEFAULT); - CHECK(did, FAIL, "H5Dopen2"); + /* Open dataset */ + did = H5Dopen2(fid1, F2_DSET, H5P_DEFAULT); + CHECK(did, FAIL, "H5Dopen2"); - /* Check that the intent works even if NULL is passed in */ - ret = H5Fget_intent(fid1, NULL); - CHECK(ret, FAIL, "H5Fget_intent"); + /* Check that the intent works even if NULL is passed in */ + ret = H5Fget_intent(fid1, NULL); + CHECK(ret, FAIL, "H5Fget_intent"); - /* Close first open */ - ret = H5Fclose(fid1); - CHECK(ret, FAIL, "H5Fclose"); + /* Close first open */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); - /* Open file for second time, which should fail. */ - H5E_BEGIN_TRY - { - fid2 = H5Fopen(FILE2, H5F_ACC_RDWR, fapl_id); - } - H5E_END_TRY - VERIFY(fid2, FAIL, "H5Fopen"); + /* Open file for second time, which should fail. */ + H5E_BEGIN_TRY + { + fid2 = H5Fopen(FILE2, H5F_ACC_RDWR, fapl_id); + } + H5E_END_TRY + VERIFY(fid2, FAIL, "H5Fopen"); - /* Check that the intent fails for an invalid ID */ - H5E_BEGIN_TRY - { - ret = H5Fget_intent(fid1, &intent); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fget_intent"); + /* Check that the intent fails for an invalid ID */ + H5E_BEGIN_TRY + { + ret = H5Fget_intent(fid1, &intent); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Fget_intent"); - /* Close dataset from first open */ - ret = H5Dclose(did); - CHECK(ret, FAIL, "H5Dclose"); + /* Close dataset from first open */ + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); - ret = H5Pclose(fapl_id); - CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); + } } /* test_file_open() */ /**************************************************************** @@ -722,6 +733,7 @@ test_file_close(void) hid_t fapl_id, access_id; hid_t dataset_id, group_id1, group_id2, group_id3; H5F_close_degree_t fc_degree; + bool vol_is_native; herr_t ret; /* Output message about test being performed */ @@ -733,6 +745,14 @@ test_file_close(void) fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid1, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid1, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid1), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + fapl_id = H5Pcreate(H5P_FILE_ACCESS); CHECK(fapl_id, FAIL, "H5Pcreate"); @@ -1162,6 +1182,118 @@ create_objects(hid_t fid1, hid_t fid2, hid_t *ret_did, hid_t *ret_gid1, hid_t *r } } +/**************************************************************** +** +** test_obj_count_and_id(): test object count and ID list functions. +** +****************************************************************/ +static void +test_obj_count_and_id(hid_t fid1, hid_t fid2, hid_t did, hid_t gid1, hid_t gid2, hid_t gid3) +{ + hid_t fid3, fid4; + ssize_t oid_count, ret_count; + herr_t ret; + + /* Create two new files */ + fid3 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid3, FAIL, "H5Fcreate"); + fid4 = H5Fcreate(FILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid4, FAIL, "H5Fcreate"); + + /* test object count of all files IDs open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_FILE); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); + + /* test object count of all datasets open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_1, "H5Fget_obj_count"); + + /* test object count of all groups open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_GROUP); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_3, "H5Fget_obj_count"); + + /* test object count of all named datatypes open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATATYPE); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); + + /* test object count of all attributes open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ATTR); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); + + /* test object count of all objects currently open */ + oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL); + CHECK(oid_count, FAIL, "H5Fget_obj_count"); + VERIFY(oid_count, OBJ_ID_COUNT_8, "H5Fget_obj_count"); + + if (oid_count > 0) { + hid_t *oid_list; + + oid_list = (hid_t *)calloc((size_t)oid_count, sizeof(hid_t)); + if (oid_list != NULL) { + int i; + + ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list); + CHECK(ret_count, FAIL, "H5Fget_obj_ids"); + + for (i = 0; i < oid_count; i++) { + H5I_type_t id_type; + + id_type = H5Iget_type(oid_list[i]); + switch (id_type) { + case H5I_FILE: + if (oid_list[i] != fid1 && oid_list[i] != fid2 && oid_list[i] != fid3 && + oid_list[i] != fid4) + ERROR("H5Fget_obj_ids"); + break; + + case H5I_GROUP: + if (oid_list[i] != gid1 && oid_list[i] != gid2 && oid_list[i] != gid3) + ERROR("H5Fget_obj_ids"); + break; + + case H5I_DATASET: + VERIFY(oid_list[i], did, "H5Fget_obj_ids"); + break; + + case H5I_MAP: + /* TODO: Not supported in native VOL connector yet */ + + case H5I_UNINIT: + case H5I_BADID: + case H5I_DATATYPE: + case H5I_DATASPACE: + case H5I_ATTR: + case H5I_VFL: + case H5I_VOL: + case H5I_GENPROP_CLS: + case H5I_GENPROP_LST: + case H5I_ERROR_CLASS: + case H5I_ERROR_MSG: + case H5I_ERROR_STACK: + case H5I_SPACE_SEL_ITER: + case H5I_EVENTSET: + case H5I_NTYPES: + default: + ERROR("H5Fget_obj_ids"); + } /* end switch */ + } /* end for */ + + free(oid_list); + } /* end if */ + } /* end if */ + + /* close the two new files */ + ret = H5Fclose(fid3); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Fclose(fid4); + CHECK(ret, FAIL, "H5Fclose"); +} + /**************************************************************** ** ** test_get_obj_ids(): Test the bug and the fix for Jira 8528. @@ -1185,6 +1317,11 @@ test_get_obj_ids(void) MESSAGE(5, ("Testing retrieval of object IDs\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create a new file */ fid = H5Fcreate(FILE7, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); @@ -1458,118 +1595,6 @@ check_file_id(hid_t fid, hid_t object_id) CHECK(ret, FAIL, "H5Fclose"); } -/**************************************************************** -** -** test_obj_count_and_id(): test object count and ID list functions. -** -****************************************************************/ -static void -test_obj_count_and_id(hid_t fid1, hid_t fid2, hid_t did, hid_t gid1, hid_t gid2, hid_t gid3) -{ - hid_t fid3, fid4; - ssize_t oid_count, ret_count; - herr_t ret; - - /* Create two new files */ - fid3 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid3, FAIL, "H5Fcreate"); - fid4 = H5Fcreate(FILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid4, FAIL, "H5Fcreate"); - - /* test object count of all files IDs open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_FILE); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count"); - - /* test object count of all datasets open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_1, "H5Fget_obj_count"); - - /* test object count of all groups open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_GROUP); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_3, "H5Fget_obj_count"); - - /* test object count of all named datatypes open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATATYPE); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); - - /* test object count of all attributes open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ATTR); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count"); - - /* test object count of all objects currently open */ - oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL); - CHECK(oid_count, FAIL, "H5Fget_obj_count"); - VERIFY(oid_count, OBJ_ID_COUNT_8, "H5Fget_obj_count"); - - if (oid_count > 0) { - hid_t *oid_list; - - oid_list = (hid_t *)calloc((size_t)oid_count, sizeof(hid_t)); - if (oid_list != NULL) { - int i; - - ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list); - CHECK(ret_count, FAIL, "H5Fget_obj_ids"); - - for (i = 0; i < oid_count; i++) { - H5I_type_t id_type; - - id_type = H5Iget_type(oid_list[i]); - switch (id_type) { - case H5I_FILE: - if (oid_list[i] != fid1 && oid_list[i] != fid2 && oid_list[i] != fid3 && - oid_list[i] != fid4) - ERROR("H5Fget_obj_ids"); - break; - - case H5I_GROUP: - if (oid_list[i] != gid1 && oid_list[i] != gid2 && oid_list[i] != gid3) - ERROR("H5Fget_obj_ids"); - break; - - case H5I_DATASET: - VERIFY(oid_list[i], did, "H5Fget_obj_ids"); - break; - - case H5I_MAP: - /* TODO: Not supported in native VOL connector yet */ - - case H5I_UNINIT: - case H5I_BADID: - case H5I_DATATYPE: - case H5I_DATASPACE: - case H5I_ATTR: - case H5I_VFL: - case H5I_VOL: - case H5I_GENPROP_CLS: - case H5I_GENPROP_LST: - case H5I_ERROR_CLASS: - case H5I_ERROR_MSG: - case H5I_ERROR_STACK: - case H5I_SPACE_SEL_ITER: - case H5I_EVENTSET: - case H5I_NTYPES: - default: - ERROR("H5Fget_obj_ids"); - } /* end switch */ - } /* end for */ - - free(oid_list); - } /* end if */ - } /* end if */ - - /* close the two new files */ - ret = H5Fclose(fid3); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Fclose(fid4); - CHECK(ret, FAIL, "H5Fclose"); -} - /**************************************************************** ** ** test_file_perm(): low-level file test routine. @@ -1736,6 +1761,7 @@ test_file_is_accessible(const char *env_h5_drvr) unsigned char buf[1024]; /* Buffer of data to write */ htri_t is_hdf5; /* Whether a file is an HDF5 file */ int posix_ret; /* Return value from POSIX calls */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Return value from HDF5 calls */ @@ -1765,6 +1791,9 @@ test_file_is_accessible(const char *env_h5_drvr) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl_id, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); @@ -1828,7 +1857,7 @@ test_file_is_accessible(const char *env_h5_drvr) VERIFY(is_hdf5, true, "H5Fis_accessible"); } /* end if */ - if (driver_is_default_compatible) { + if (vol_is_native && driver_is_default_compatible) { /***********************/ /* EMPTY non-HDF5 file */ /***********************/ @@ -1902,7 +1931,8 @@ test_file_ishdf5(const char *env_h5_drvr) unsigned char buf[1024]; /* Buffer of data to write */ htri_t is_hdf5; /* Whether a file is an HDF5 file */ int posix_ret; /* Return value from POSIX calls */ - herr_t ret; /* Return value from HDF5 calls */ + bool vol_is_native; + herr_t ret; /* Return value from HDF5 calls */ if (!h5_using_default_driver(env_h5_drvr)) return; @@ -1930,6 +1960,15 @@ test_file_ishdf5(const char *env_h5_drvr) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl_id, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Pclose(fapl_id), FAIL, "H5Pclose"); + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Close file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); @@ -2017,6 +2056,7 @@ test_file_delete(hid_t fapl_id) htri_t is_hdf5; /* Whether a file is an HDF5 file */ int fd; /* POSIX file descriptor */ int iret; + bool vol_is_native; herr_t ret; /* Output message about test being performed */ @@ -2033,6 +2073,9 @@ test_file_delete(hid_t fapl_id) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl_id, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(fid); VERIFY(ret, SUCCEED, "H5Fclose"); @@ -2054,49 +2097,50 @@ test_file_delete(hid_t fapl_id) H5E_END_TRY VERIFY(is_hdf5, FAIL, "H5Fis_accessible"); - /* Just in case deletion fails - silent on errors */ - h5_delete_test_file(FILE_DELETE, fapl_id); + if (vol_is_native) { + /* Just in case deletion fails - silent on errors */ + h5_delete_test_file(FILE_DELETE, fapl_id); - /*****************/ - /* NON-HDF5 FILE */ - /*****************/ + /*****************/ + /* NON-HDF5 FILE */ + /*****************/ - /* Get fapl-dependent filename */ - h5_fixname(FILE_DELETE_NOT_HDF5, fapl_id, filename, sizeof(filename)); + /* Get fapl-dependent filename */ + h5_fixname(FILE_DELETE_NOT_HDF5, fapl_id, filename, sizeof(filename)); - /* Create a non-HDF5 file */ - fd = HDopen(filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); - CHECK_I(fd, "HDopen"); - - /* Close the file */ - ret = HDclose(fd); - VERIFY(ret, 0, "HDclose"); + /* Create a non-HDF5 file */ + fd = HDopen(filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW); + CHECK_I(fd, "HDopen"); - /* Verify that the file is not an HDF5 file */ - /* Note that you can get a FAIL result when h5_fixname() - * perturbs the filename as a file with that exact name - * may not have been created since we created it with - * open(2) and not the library. - */ - H5E_BEGIN_TRY - { - is_hdf5 = H5Fis_accessible(filename, fapl_id); - } - H5E_END_TRY - CHECK(is_hdf5, true, "H5Fis_accessible"); + /* Close the file */ + ret = HDclose(fd); + VERIFY(ret, 0, "HDclose"); - /* Try to delete it (should fail) */ - H5E_BEGIN_TRY - { - ret = H5Fdelete(filename, fapl_id); - } - H5E_END_TRY - VERIFY(ret, FAIL, "H5Fdelete"); + /* Verify that the file is not an HDF5 file */ + /* Note that you can get a FAIL result when h5_fixname() + * perturbs the filename as a file with that exact name + * may not have been created since we created it with + * open(2) and not the library. + */ + H5E_BEGIN_TRY + { + is_hdf5 = H5Fis_accessible(filename, fapl_id); + } + H5E_END_TRY + CHECK(is_hdf5, true, "H5Fis_accessible"); - /* Delete the file */ - iret = HDremove(filename); - VERIFY(iret, 0, "HDremove"); + /* Try to delete it (should fail) */ + H5E_BEGIN_TRY + { + ret = H5Fdelete(filename, fapl_id); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Fdelete"); + /* Delete the file */ + iret = HDremove(filename); + VERIFY(iret, 0, "HDremove"); + } } /* end test_file_delete() */ /**************************************************************** @@ -2263,6 +2307,11 @@ test_file_open_overlap(void) /* Output message about test being performed */ MESSAGE(5, ("Testing opening overlapping file opens\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid1, FAIL, "H5Fcreate"); @@ -2623,6 +2672,11 @@ test_file_double_file_dataset_open(bool new_format) /* Output message about test being performed */ MESSAGE(5, ("Testing double file and dataset open/close\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Setting up test file */ fapl = h5_fileaccess(); CHECK(fapl, FAIL, "H5Pcreate"); @@ -3011,7 +3065,8 @@ test_userblock_file_size(const char *env_h5_drvr) hsize_t dims[2] = {3, 4}; hsize_t filesize1, filesize2, filesize; unsigned long fileno1, fileno2; /* File number */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Don't run with multi/split, family or direct drivers */ if (!strcmp(env_h5_drvr, "multi") || !strcmp(env_h5_drvr, "split") || !strcmp(env_h5_drvr, "family") || @@ -3021,6 +3076,11 @@ test_userblock_file_size(const char *env_h5_drvr) /* Output message about test being performed */ MESSAGE(5, ("Testing file size with user block\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create property list with userblock size set */ fcpl2_id = H5Pcreate(H5P_FILE_CREATE); CHECK(fcpl2_id, FAIL, "H5Pcreate"); @@ -3033,6 +3093,9 @@ test_userblock_file_size(const char *env_h5_drvr) file2_id = H5Fcreate(FILE2, H5F_ACC_TRUNC, fcpl2_id, H5P_DEFAULT); CHECK(file2_id, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file1_id, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Check the file numbers */ fileno1 = 0; ret = H5Fget_fileno(file1_id, &fileno1); @@ -3084,15 +3147,17 @@ test_userblock_file_size(const char *env_h5_drvr) file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); CHECK(file2_id, FAIL, "H5Fopen"); - /* Check file sizes */ - ret = H5Fget_filesize(file1_id, &filesize1); - CHECK(ret, FAIL, "H5Fget_filesize"); - ret = H5Fget_filesize(file2_id, &filesize2); - CHECK(ret, FAIL, "H5Fget_filesize"); + if (vol_is_native) { + /* Check file sizes */ + ret = H5Fget_filesize(file1_id, &filesize1); + CHECK(ret, FAIL, "H5Fget_filesize"); + ret = H5Fget_filesize(file2_id, &filesize2); + CHECK(ret, FAIL, "H5Fget_filesize"); - /* Verify that the file sizes differ exactly by the userblock size */ - VERIFY_TYPE((unsigned long long)filesize2, (unsigned long long)(filesize1 + USERBLOCK_SIZE), - unsigned long long, "%llu", "H5Fget_filesize"); + /* Verify that the file sizes differ exactly by the userblock size */ + VERIFY_TYPE((unsigned long long)filesize2, (unsigned long long)(filesize1 + USERBLOCK_SIZE), + unsigned long long, "%llu", "H5Fget_filesize"); + } /* Close files */ ret = H5Fclose(file1_id); @@ -3106,13 +3171,15 @@ test_userblock_file_size(const char *env_h5_drvr) file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT); CHECK(file2_id, FAIL, "H5Fopen"); - /* Verify file sizes did not change */ - ret = H5Fget_filesize(file1_id, &filesize); - CHECK(ret, FAIL, "H5Fget_filesize"); - VERIFY(filesize, filesize1, "H5Fget_filesize"); - ret = H5Fget_filesize(file2_id, &filesize); - CHECK(ret, FAIL, "H5Fget_filesize"); - VERIFY(filesize, filesize2, "H5Fget_filesize"); + if (vol_is_native) { + /* Verify file sizes did not change */ + ret = H5Fget_filesize(file1_id, &filesize); + CHECK(ret, FAIL, "H5Fget_filesize"); + VERIFY(filesize, filesize1, "H5Fget_filesize"); + ret = H5Fget_filesize(file2_id, &filesize); + CHECK(ret, FAIL, "H5Fget_filesize"); + VERIFY(filesize, filesize2, "H5Fget_filesize"); + } /* Close files */ ret = H5Fclose(file1_id); @@ -3135,6 +3202,7 @@ test_cached_stab_info(void) { hid_t file_id; hid_t group_id; + bool vol_is_native; herr_t ret; /* Generic return value */ /* Output message about test being performed */ @@ -3144,6 +3212,14 @@ test_cached_stab_info(void) file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file_id, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file_id, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(file_id), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create group */ group_id = H5Gcreate2(file_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(group_id, FAIL, "H5Gcreate2"); @@ -3229,6 +3305,7 @@ test_rw_noupdate(void) herr_t ret; /* Generic return value */ hid_t fid; /* File ID */ uint32_t chksum1, chksum2; /* Checksum value */ + bool vol_is_native; /* Output message about test being performed */ MESSAGE(5, ("Testing to verify that nothing is written if nothing is changed.\n")); @@ -3237,6 +3314,14 @@ test_rw_noupdate(void) fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Close the file */ ret = H5Fclose(fid); CHECK(ret, FAIL, "H5Fclose"); @@ -3273,12 +3358,20 @@ test_userblock_alignment_helper1(hid_t fcpl, hid_t fapl) { hid_t fid; /* File ID */ int curr_num_errs = GetTestNumErrs(); /* Retrieve the current # of errors */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Create a file with FAPL & FCPL */ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + return 0; + } + /* Only proceed further if file ID is OK */ if (fid > 0) { hid_t gid; /* Group ID */ @@ -3331,12 +3424,20 @@ test_userblock_alignment_helper2(hid_t fapl, bool open_rw) { hid_t fid; /* File ID */ int curr_num_errs = GetTestNumErrs(); /* Retrieve the current # of errors */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Re-open file */ fid = H5Fopen(FILE1, (open_rw ? H5F_ACC_RDWR : H5F_ACC_RDONLY), fapl); CHECK(fid, FAIL, "H5Fopen"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + return 0; + } + /* Only proceed further if file ID is OK */ if (fid > 0) { hid_t gid; /* Group ID */ @@ -4052,7 +4153,8 @@ test_filespace_info(const char *env_h5_drvr) hsize_t fsp_size; /* File space page size */ char filename[FILENAME_LEN]; /* Filename to use */ bool contig_addr_vfd; /* Whether VFD used has a contiguous address space */ - herr_t ret; /* Return value */ + bool vol_is_native; + herr_t ret; /* Return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing file creation public routines: H5Pget/set_file_space_strategy & " @@ -4063,6 +4165,13 @@ test_filespace_info(const char *env_h5_drvr) fapl = h5_fileaccess(); h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Get a copy of the file access property list */ new_fapl = H5Pcopy(fapl); CHECK(new_fapl, FAIL, "H5Pcopy"); @@ -4465,143 +4574,151 @@ test_file_freespace(const char *env_h5_drvr) bool split_vfd, multi_vfd; /* Indicate multi/split driver */ hsize_t expected_freespace; /* Freespace expected */ hsize_t expected_fs_del; /* Freespace expected after delete */ - herr_t ret; /* Return value */ + bool vol_is_native; + herr_t ret; /* Return value */ split_vfd = !strcmp(env_h5_drvr, "split"); multi_vfd = !strcmp(env_h5_drvr, "multi"); - if (!split_vfd && !multi_vfd) { - fapl = h5_fileaccess(); - h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); - - new_fapl = H5Pcopy(fapl); - CHECK(new_fapl, FAIL, "H5Pcopy"); + if (split_vfd || multi_vfd) + return; - /* Set the "use the latest version of the format" bounds */ - ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); + fapl = h5_fileaccess(); + h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); + new_fapl = H5Pcopy(fapl); + CHECK(new_fapl, FAIL, "H5Pcopy"); - /* Test with old & new format */ - for (new_format = false; new_format <= true; new_format++) { - hid_t my_fapl; + /* Set the "use the latest version of the format" bounds */ + ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); - /* Set the FAPL for the type of format */ - if (new_format) { - MESSAGE(5, ("Testing with new group format\n")); + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); - my_fapl = new_fapl; + /* Test with old & new format */ + for (new_format = false; new_format <= true; new_format++) { + hid_t my_fapl; - if (multi_vfd || split_vfd) { - ret = set_multi_split(new_fapl, FSP_SIZE_DEF, split_vfd); - CHECK(ret, FAIL, "set_multi_split"); - } + /* Set the FAPL for the type of format */ + if (new_format) { + MESSAGE(5, ("Testing with new group format\n")); - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, (hsize_t)1); - CHECK(ret, FAIL, "H5P_set_file_space_strategy"); + my_fapl = new_fapl; - expected_freespace = 4534; - if (split_vfd) - expected_freespace = 427; - if (multi_vfd) - expected_freespace = 248; - expected_fs_del = 0; - } /* end if */ - else { - MESSAGE(5, ("Testing with old group format\n")); - /* Default: non-paged aggregation, non-persistent free-space */ - my_fapl = fapl; - expected_freespace = 2464; - if (split_vfd) - expected_freespace = 264; - if (multi_vfd) - expected_freespace = 0; - expected_fs_del = 4096; - - } /* end else */ - - /* Create an "empty" file */ - file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl); - CHECK(file, FAIL, "H5Fcreate"); + if (multi_vfd || split_vfd) { + ret = set_multi_split(new_fapl, FSP_SIZE_DEF, split_vfd); + CHECK(ret, FAIL, "set_multi_split"); + } - ret = H5Fclose(file); - CHECK_I(ret, "H5Fclose"); + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, false, (hsize_t)1); + CHECK(ret, FAIL, "H5P_set_file_space_strategy"); - /* Get the "empty" file size */ - empty_filesize = h5_get_file_size(filename, H5P_DEFAULT); + expected_freespace = 4534; + if (split_vfd) + expected_freespace = 427; + if (multi_vfd) + expected_freespace = 248; + expected_fs_del = 0; + } /* end if */ + else { + MESSAGE(5, ("Testing with old group format\n")); + /* Default: non-paged aggregation, non-persistent free-space */ + my_fapl = fapl; + expected_freespace = 2464; + if (split_vfd) + expected_freespace = 264; + if (multi_vfd) + expected_freespace = 0; + expected_fs_del = 4096; - /* Re-open the file (with read-write permission) */ - file = H5Fopen(filename, H5F_ACC_RDWR, my_fapl); - CHECK_I(file, "H5Fopen"); + } /* end else */ - /* Check that the free space is 0 */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, 0, "H5Fget_freespace"); + /* Create an "empty" file */ + file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl); + CHECK(file, FAIL, "H5Fcreate"); - /* Create dataspace for datasets */ - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(my_fapl, file, &vol_is_native), FAIL, "h5_using_native_vol"); - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); + ret = H5Fclose(file); + CHECK_I(ret, "H5Fclose"); - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); + if (!vol_is_native) + continue; - /* Create datasets in file */ - for (u = 0; u < 10; u++) { - snprintf(name, sizeof(name), "Dataset %u", u); - dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); + /* Get the "empty" file size */ + empty_filesize = h5_get_file_size(filename, H5P_DEFAULT); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ + /* Re-open the file (with read-write permission) */ + file = H5Fopen(filename, H5F_ACC_RDWR, my_fapl); + CHECK_I(file, "H5Fopen"); - /* Close dataspace */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); + /* Check that the free space is 0 */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, 0, "H5Fget_freespace"); - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); + /* Create dataspace for datasets */ + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); - /* Check that there is the right amount of free space in the file */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, expected_freespace, "H5Fget_freespace"); + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); - /* Delete datasets in file */ - for (k = 9; k >= 0; k--) { - snprintf(name, sizeof(name), "Dataset %u", (unsigned)k); - ret = H5Ldelete(file, name, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end for */ + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); - /* Check that there is the right amount of free space in the file */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); - VERIFY(free_space, expected_fs_del, "H5Fget_freespace"); + /* Create datasets in file */ + for (u = 0; u < 10; u++) { + snprintf(name, sizeof(name), "Dataset %u", u); + dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ - /* Get the file size after modifications*/ - mod_filesize = h5_get_file_size(filename, H5P_DEFAULT); + /* Close dataspace */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); - /* Check that the file reverted to empty size */ - VERIFY(mod_filesize, empty_filesize, "H5Fget_freespace"); + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); - h5_clean_files(FILESPACE_NAME, my_fapl); + /* Check that there is the right amount of free space in the file */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, expected_freespace, "H5Fget_freespace"); + /* Delete datasets in file */ + for (k = 9; k >= 0; k--) { + snprintf(name, sizeof(name), "Dataset %u", (unsigned)k); + ret = H5Ldelete(file, name, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); } /* end for */ - } + + /* Check that there is the right amount of free space in the file */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); + VERIFY(free_space, expected_fs_del, "H5Fget_freespace"); + + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + + /* Get the file size after modifications*/ + mod_filesize = h5_get_file_size(filename, H5P_DEFAULT); + + /* Check that the file reverted to empty size */ + VERIFY(mod_filesize, empty_filesize, "H5Fget_freespace"); + + h5_clean_files(FILESPACE_NAME, my_fapl); + + } /* end for */ } /* end test_file_freespace() */ @@ -4638,6 +4755,7 @@ test_sects_freespace(const char *env_h5_drvr, bool new_format) unsigned u; /* Local index variable */ H5FD_mem_t type; bool split_vfd = false, multi_vfd = false; + bool vol_is_native; herr_t ret; /* Return value */ /* Output message about test being performed */ @@ -4646,210 +4764,223 @@ test_sects_freespace(const char *env_h5_drvr, bool new_format) split_vfd = !strcmp(env_h5_drvr, "split"); multi_vfd = !strcmp(env_h5_drvr, "multi"); - if (!split_vfd && !multi_vfd) { + if (split_vfd || multi_vfd) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } - fapl = h5_fileaccess(); - h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); + fapl = h5_fileaccess(); + h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename); - /* Create file-creation template */ - fcpl = H5Pcreate(H5P_FILE_CREATE); - CHECK(fcpl, FAIL, "H5Pcreate"); + /* Create file-creation template */ + fcpl = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl, FAIL, "H5Pcreate"); - if (new_format) { - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - CHECK(ret, FAIL, "H5Pset_libver_bounds"); + if (new_format) { + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); - /* Set to paged aggregation and persistent free-space */ - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, true, (hsize_t)1); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + /* Set to paged aggregation and persistent free-space */ + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, true, (hsize_t)1); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); - /* Set up paged aligned address space for multi/split driver */ - if (multi_vfd || split_vfd) { - ret = set_multi_split(fapl, FSP_SIZE_DEF, split_vfd); - CHECK(ret, FAIL, "set_multi_split"); - } - } - else { - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, true, (hsize_t)1); - CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + /* Set up paged aligned address space for multi/split driver */ + if (multi_vfd || split_vfd) { + ret = set_multi_split(fapl, FSP_SIZE_DEF, split_vfd); + CHECK(ret, FAIL, "set_multi_split"); } + } + else { + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, true, (hsize_t)1); + CHECK(ret, FAIL, "H5Pget_file_space_strategy"); + } - /* Create the file */ - file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); - CHECK(file, FAIL, "H5Fcreate"); + /* Create the file */ + file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + CHECK(file, FAIL, "H5Fcreate"); - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - CHECK(dcpl, FAIL, "H5Pcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, file, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(file), FAIL, "H5Fclose"); + CHECK(H5Pclose(fcpl), FAIL, "H5Pclose"); + h5_clean_files(FILESPACE_NAME, fapl); + CHECK(H5Pclose(fapl), FAIL, "H5Pclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } - /* Set the space allocation time to early */ - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - CHECK(ret, FAIL, "H5Pset_alloc_time"); + /* Create a dataset creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + CHECK(dcpl, FAIL, "H5Pcreate"); - /* Create 1 large dataset */ - dims[0] = 1200; - dspace = H5Screate_simple(1, dims, NULL); - dset = H5Dcreate2(file, "Dataset_large", H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); + /* Set the space allocation time to early */ + ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); + CHECK(ret, FAIL, "H5Pset_alloc_time"); - /* Close dataset */ - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); + /* Create 1 large dataset */ + dims[0] = 1200; + dspace = H5Screate_simple(1, dims, NULL); + dset = H5Dcreate2(file, "Dataset_large", H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); - /* Close dataspace */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); + /* Close dataset */ + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); - /* Create dataspace for datasets */ - dspace = H5Screate(H5S_SCALAR); - CHECK(dspace, FAIL, "H5Screate"); + /* Close dataspace */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); - /* Create datasets in file */ - for (u = 0; u < 10; u++) { - snprintf(name, sizeof(name), "Dataset %u", u); - dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - CHECK(dset, FAIL, "H5Dcreate2"); + /* Create dataspace for datasets */ + dspace = H5Screate(H5S_SCALAR); + CHECK(dspace, FAIL, "H5Screate"); - ret = H5Dclose(dset); - CHECK(ret, FAIL, "H5Dclose"); - } /* end for */ + /* Create datasets in file */ + for (u = 0; u < 10; u++) { + snprintf(name, sizeof(name), "Dataset %u", u); + dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + CHECK(dset, FAIL, "H5Dcreate2"); - /* Close dataspace */ - ret = H5Sclose(dspace); - CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dset); + CHECK(ret, FAIL, "H5Dclose"); + } /* end for */ - /* Close dataset creation property list */ - ret = H5Pclose(dcpl); - CHECK(ret, FAIL, "H5Pclose"); + /* Close dataspace */ + ret = H5Sclose(dspace); + CHECK(ret, FAIL, "H5Sclose"); - /* Delete odd-numbered datasets in file */ - for (u = 0; u < 10; u++) { - snprintf(name, sizeof(name), "Dataset %u", u); - if (u % 2) { - ret = H5Ldelete(file, name, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Ldelete"); - } /* end if */ - } /* end for */ + /* Close dataset creation property list */ + ret = H5Pclose(dcpl); + CHECK(ret, FAIL, "H5Pclose"); - /* Close file */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); + /* Delete odd-numbered datasets in file */ + for (u = 0; u < 10; u++) { + snprintf(name, sizeof(name), "Dataset %u", u); + if (u % 2) { + ret = H5Ldelete(file, name, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Ldelete"); + } /* end if */ + } /* end for */ - /* Re-open the file with read-only permission */ - file = H5Fopen(filename, H5F_ACC_RDONLY, fapl); - CHECK_I(file, "H5Fopen"); + /* Close file */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); - /* Get the amount of free space in the file */ - free_space = H5Fget_freespace(file); - CHECK(free_space, FAIL, "H5Fget_freespace"); + /* Re-open the file with read-only permission */ + file = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + CHECK_I(file, "H5Fopen"); + + /* Get the amount of free space in the file */ + free_space = H5Fget_freespace(file); + CHECK(free_space, FAIL, "H5Fget_freespace"); + + /* Get the total # of free-space sections in the file */ + nall = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL); + CHECK(nall, FAIL, "H5Fget_free_sections"); + + /* Should return failure when nsects is 0 with a nonnull sect_info */ + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info); + VERIFY(nsects, FAIL, "H5Fget_free_sections"); + + /* Retrieve and verify free space info for all the sections */ + memset(all_sect_info, 0, sizeof(all_sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)nall, all_sect_info); + VERIFY(nsects, nall, "H5Fget_free_sections"); + + /* Verify the amount of free-space is correct */ + for (u = 0; u < nall; u++) + total += all_sect_info[u].size; + VERIFY(free_space, total, "H5Fget_free_sections"); + + /* Save the last section's size */ + last_size = all_sect_info[nall - 1].size; + + /* Retrieve and verify free space info for -1 sections */ + memset(sect_info, 0, sizeof(sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall - 1), sect_info); + VERIFY(nsects, nall, "H5Fget_free_sections"); + + /* Verify the amount of free-space is correct */ + total = 0; + for (u = 0; u < (nall - 1); u++) { + VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); + VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); + total += sect_info[u].size; + } + VERIFY(((hsize_t)free_space - last_size), total, "H5Fget_free_sections"); + + /* Retrieve and verify free-space info for +1 sections */ + memset(sect_info, 0, sizeof(sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall + 1), sect_info); + VERIFY(nsects, nall, "H5Fget_free_sections"); + + /* Verify amount of free-space is correct */ + total = 0; + for (u = 0; u < nall; u++) { + VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); + VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); + total += sect_info[u].size; + } + VERIFY(sect_info[nall].addr, 0, "H5Fget_free_sections"); + VERIFY(sect_info[nall].size, 0, "H5Fget_free_sections"); + VERIFY(free_space, total, "H5Fget_free_sections"); - /* Get the total # of free-space sections in the file */ - nall = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL); - CHECK(nall, FAIL, "H5Fget_free_sections"); - - /* Should return failure when nsects is 0 with a nonnull sect_info */ - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info); - VERIFY(nsects, FAIL, "H5Fget_free_sections"); - - /* Retrieve and verify free space info for all the sections */ - memset(all_sect_info, 0, sizeof(all_sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)nall, all_sect_info); - VERIFY(nsects, nall, "H5Fget_free_sections"); - - /* Verify the amount of free-space is correct */ - for (u = 0; u < nall; u++) - total += all_sect_info[u].size; - VERIFY(free_space, total, "H5Fget_free_sections"); - - /* Save the last section's size */ - last_size = all_sect_info[nall - 1].size; - - /* Retrieve and verify free space info for -1 sections */ - memset(sect_info, 0, sizeof(sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall - 1), sect_info); - VERIFY(nsects, nall, "H5Fget_free_sections"); - - /* Verify the amount of free-space is correct */ - total = 0; - for (u = 0; u < (nall - 1); u++) { - VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); - VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); - total += sect_info[u].size; - } - VERIFY(((hsize_t)free_space - last_size), total, "H5Fget_free_sections"); - - /* Retrieve and verify free-space info for +1 sections */ - memset(sect_info, 0, sizeof(sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall + 1), sect_info); - VERIFY(nsects, nall, "H5Fget_free_sections"); - - /* Verify amount of free-space is correct */ - total = 0; - for (u = 0; u < nall; u++) { - VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections"); - VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections"); - total += sect_info[u].size; - } - VERIFY(sect_info[nall].addr, 0, "H5Fget_free_sections"); - VERIFY(sect_info[nall].size, 0, "H5Fget_free_sections"); - VERIFY(free_space, total, "H5Fget_free_sections"); + memset(meta_sect_info, 0, sizeof(meta_sect_info)); + if (multi_vfd) { + hssize_t ntmp; - memset(meta_sect_info, 0, sizeof(meta_sect_info)); - if (multi_vfd) { - hssize_t ntmp; + for (type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; type++) { + if (type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP) + continue; + /* Get the # of free-space sections in the file for metadata */ + ntmp = H5Fget_free_sections(file, type, (size_t)0, NULL); + CHECK(ntmp, FAIL, "H5Fget_free_sections"); - for (type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; type++) { - if (type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP) - continue; - /* Get the # of free-space sections in the file for metadata */ - ntmp = H5Fget_free_sections(file, type, (size_t)0, NULL); - CHECK(ntmp, FAIL, "H5Fget_free_sections"); - - if (ntmp > 0) { - nsects = H5Fget_free_sections(file, type, (size_t)ntmp, &meta_sect_info[nmeta]); - VERIFY(nsects, ntmp, "H5Fget_free_sections"); - nmeta += ntmp; - } + if (ntmp > 0) { + nsects = H5Fget_free_sections(file, type, (size_t)ntmp, &meta_sect_info[nmeta]); + VERIFY(nsects, ntmp, "H5Fget_free_sections"); + nmeta += ntmp; } } - else { - /* Get the # of free-space sections in the file for metadata */ - nmeta = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)0, NULL); - CHECK(nmeta, FAIL, "H5Fget_free_sections"); + } + else { + /* Get the # of free-space sections in the file for metadata */ + nmeta = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)0, NULL); + CHECK(nmeta, FAIL, "H5Fget_free_sections"); - /* Retrieve and verify free-space sections for metadata */ - nsects = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)nmeta, meta_sect_info); - VERIFY(nsects, nmeta, "H5Fget_free_sections"); - } + /* Retrieve and verify free-space sections for metadata */ + nsects = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)nmeta, meta_sect_info); + VERIFY(nsects, nmeta, "H5Fget_free_sections"); + } - /* Get the # of free-space sections in the file for raw data */ - nraw = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)0, NULL); - CHECK(nraw, FAIL, "H5Fget_free_sections"); + /* Get the # of free-space sections in the file for raw data */ + nraw = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)0, NULL); + CHECK(nraw, FAIL, "H5Fget_free_sections"); - /* Retrieve and verify free-space sections for raw data */ - memset(raw_sect_info, 0, sizeof(raw_sect_info)); - nsects = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)nraw, raw_sect_info); - VERIFY(nsects, nraw, "H5Fget_free_sections"); + /* Retrieve and verify free-space sections for raw data */ + memset(raw_sect_info, 0, sizeof(raw_sect_info)); + nsects = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)nraw, raw_sect_info); + VERIFY(nsects, nraw, "H5Fget_free_sections"); - /* Sum all the free-space sections */ - for (u = 0; u < nmeta; u++) - tmp_tot += meta_sect_info[u].size; + /* Sum all the free-space sections */ + for (u = 0; u < nmeta; u++) + tmp_tot += meta_sect_info[u].size; - for (u = 0; u < nraw; u++) - tmp_tot += raw_sect_info[u].size; + for (u = 0; u < nraw; u++) + tmp_tot += raw_sect_info[u].size; - /* Verify free-space info */ - VERIFY(nmeta + nraw, nall, "H5Fget_free_sections"); - VERIFY(tmp_tot, total, "H5Fget_free_sections"); + /* Verify free-space info */ + VERIFY(nmeta + nraw, nall, "H5Fget_free_sections"); + VERIFY(tmp_tot, total, "H5Fget_free_sections"); - /* Closing */ - ret = H5Fclose(file); - CHECK(ret, FAIL, "H5Fclose"); - ret = H5Pclose(fcpl); - CHECK(fcpl, FAIL, "H5Pclose"); + /* Closing */ + ret = H5Fclose(file); + CHECK(ret, FAIL, "H5Fclose"); + ret = H5Pclose(fcpl); + CHECK(fcpl, FAIL, "H5Pclose"); - h5_clean_files(FILESPACE_NAME, fapl); - } + h5_clean_files(FILESPACE_NAME, fapl); } /* end test_sects_freespace() */ @@ -4878,11 +5009,19 @@ test_filespace_compatible(void) bool persist; /* Persist free-space or not */ hsize_t threshold; /* Free-space section threshold */ H5F_fspace_strategy_t strategy; /* File space handling strategy */ - herr_t ret; /* Return value */ + bool vol_is_native; + herr_t ret; /* Return value */ /* Output message about test being performed */ MESSAGE(5, ("File space compatibility testing for 1.6 and 1.8 files\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + for (j = 0; j < NELMTS(OLD_FILENAME); j++) { const char *filename = H5_get_srcdir_filename(OLD_FILENAME[j]); /* Corrected test file name */ @@ -5007,11 +5146,19 @@ test_filespace_1_10_0_compatible(void) int rdbuf[24]; /* Buffer for dataset data */ int status; /* Status from copying the existing file */ unsigned i, j; /* Local index variable */ - herr_t ret; /* Return value */ + bool vol_is_native; + herr_t ret; /* Return value */ /* Output message about test being performed */ MESSAGE(5, ("File space compatibility testing for 1.10.0 files\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + for (j = 0; j < NELMTS(OLD_1_10_0_FILENAME); j++) { /* Make a copy of the test file */ status = h5_make_local_copy(OLD_1_10_0_FILENAME[j], FILE5); @@ -5319,11 +5466,19 @@ test_filespace_round_compatible(void) hsize_t threshold; /* Free-space section threshold */ hssize_t free_space; /* Amount of free space in the file */ int status; /* Status from copying the existing file */ - herr_t ret; /* Return value */ + bool vol_is_native; + herr_t ret; /* Return value */ /* Output message about test being performed */ MESSAGE(5, ("File space compatibility testing for files from trunk to 1_8 to trunk\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + for (j = 0; j < NELMTS(FSPACE_FILENAMES); j++) { /* Make a copy of the test file */ status = h5_make_local_copy(FSPACE_FILENAMES[j], FILE5); @@ -5371,7 +5526,8 @@ test_libver_bounds_real(H5F_libver_t libver_create, unsigned oh_vers_create, H5F hid_t file, group; /* Handles */ hid_t fapl; /* File access property list */ H5O_native_info_t ninfo; /* Object info */ - herr_t ret; /* Return value */ + bool vol_is_native; + herr_t ret; /* Return value */ /* * Create a new file using the creation properties. @@ -5385,6 +5541,15 @@ test_libver_bounds_real(H5F_libver_t libver_create, unsigned oh_vers_create, H5F file = H5Fcreate("tfile5.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(file, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, file, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Pclose(fapl), FAIL, "H5Pclose"); + CHECK(H5Fclose(file), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* * Make sure the root group has the correct object header version */ @@ -5485,11 +5650,19 @@ test_libver_bounds_open(void) hsize_t dim[1] = {SPACE1_DIM1}; /* Dataset dimensions */ H5F_libver_t low, high; /* File format bounds */ hsize_t chunk_dim[1] = {SPACE1_DIM1}; /* Chunk dimensions */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Opening File in Various Version Bounds\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create a file access property list */ fapl = H5Pcreate(H5P_FILE_ACCESS); CHECK(fapl, FAIL, "H5Pcreate"); @@ -5618,16 +5791,24 @@ test_libver_bounds_copy(void) hid_t fapl = H5I_INVALID_HID; /* File access property list ID */ const char *src_fname; /* Source file name */ herr_t ret; /* Generic return value */ + bool vol_is_native; bool driver_is_default_compatible; /* Output message about the test being performed */ MESSAGE(5, ("Testing H5Ocopy a dataset in a 1.8 library file to a 1.10 library file\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK_I(ret, "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } @@ -5722,11 +5903,19 @@ test_libver_bounds_low_high(const char *env_h5_drvr) { hid_t fapl = H5I_INVALID_HID; /* File access property list */ H5F_libver_t low, high; /* Low and high bounds */ - herr_t ret; /* The return value */ + bool vol_is_native; + herr_t ret; /* The return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing setting (low, high) format version bounds\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create a file access property list */ fapl = H5Pcreate(H5P_FILE_ACCESS); CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); @@ -7653,11 +7842,19 @@ test_incr_filesize(void) haddr_t stored_eoa; /* The stored EOA value */ hid_t driver_id = H5I_INVALID_HID; /* ID for this VFD */ unsigned long driver_flags = 0; /* VFD feature flags */ - herr_t ret; /* Return value */ + bool vol_is_native; + herr_t ret; /* Return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing H5Fincrement_filesize() and H5Fget_eoa())\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + fapl = h5_fileaccess(); h5_fixname(FILE_INCR_FILESIZE, fapl, filename, sizeof filename); @@ -7775,6 +7972,7 @@ test_min_dset_ohdr(void) hid_t file_id = H5I_INVALID_HID; hid_t file2_id = H5I_INVALID_HID; bool minimize; + bool vol_is_native; herr_t ret; MESSAGE(5, ("Testing dataset object header minimization\n")); @@ -7788,6 +7986,14 @@ test_min_dset_ohdr(void) file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK_I(file_id, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file_id, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(file_id), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /*********/ /* TESTS */ /*********/ @@ -7909,7 +8115,8 @@ test_deprec(const char *env_h5_drvr) unsigned stab; /* Symbol table entry version # */ unsigned shhdr; /* Shared object header version # */ H5F_info1_t finfo; /* global information about file */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing deprecated routines\n")); @@ -7922,6 +8129,14 @@ test_deprec(const char *env_h5_drvr) file = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(file), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Get the file's version information */ ret = H5Fget_info1(file, &finfo); CHECK(ret, FAIL, "H5Fget_info1"); diff --git a/test/th5o.c b/test/th5o.c index 17cfad7f5fd..801091f6b9f 100644 --- a/test/th5o.c +++ b/test/th5o.c @@ -267,7 +267,8 @@ test_h5o_open_by_addr(void) H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ H5G_info_t ginfo; /* Group info struct */ H5T_class_t type_class; /* Class of the datatype */ - herr_t ret; /* Value returned from API calls */ + bool vol_is_native; + herr_t ret; /* Value returned from API calls */ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); @@ -275,6 +276,13 @@ test_h5o_open_by_addr(void) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + return; + } + /* Create a group, dataset, and committed datatype within the file */ /* Create the group */ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -1109,6 +1117,7 @@ test_h5o_comment(void) char check_comment[64]; ssize_t comment_len = 0; ssize_t len; + bool vol_is_native; herr_t ret; /* Value returned from API calls */ int ret_value; @@ -1118,6 +1127,13 @@ test_h5o_comment(void) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + return; + } + /* Create an attribute for the file */ attr_space = H5Screate_simple(1, &attr_dims, NULL); CHECK(attr_space, FAIL, "H5Screate_simple"); @@ -1284,6 +1300,7 @@ test_h5o_comment_by_name(void) char check_comment[64]; ssize_t comment_len = 0; ssize_t len; + bool vol_is_native; herr_t ret; /* Value returned from API calls */ int ret_value; @@ -1293,6 +1310,13 @@ test_h5o_comment_by_name(void) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + return; + } + /* Create an attribute for the file */ attr_space = H5Screate_simple(1, &attr_dims, NULL); CHECK(attr_space, FAIL, "H5Screate_simple"); @@ -1556,7 +1580,8 @@ test_h5o_open_by_addr_deprec(void) H5I_type_t id_type; /* Type of IDs returned from H5Oopen */ H5G_info_t ginfo; /* Group info struct */ H5T_class_t type_class; /* Class of the datatype */ - herr_t ret; /* Value returned from API calls */ + bool vol_is_native; + herr_t ret; /* Value returned from API calls */ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename); @@ -1564,6 +1589,13 @@ test_h5o_open_by_addr_deprec(void) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + return; + } + /* Create a group, dataset, and committed datatype within the file */ /* Create the group */ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -1739,7 +1771,8 @@ test_h5o_getinfo_visit(void) H5O_info1_t oinfo1, oinfo2; /* Object info structs */ char attrname[25]; /* Attribute name */ int j; /* Local index variable */ - herr_t ret; /* Value returned from API calls */ + bool vol_is_native; + herr_t ret; /* Value returned from API calls */ /* Output message about test being performed */ MESSAGE(5, ("Testing info returned by H5Oget_info vs H5Ovisit\n")); @@ -1750,6 +1783,14 @@ test_h5o_getinfo_visit(void) fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create "group1" in the file */ gid1 = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(gid1, FAIL, "H5Gcreate2"); diff --git a/test/th5s.c b/test/th5s.c index 734365398f6..f0e4959a54f 100644 --- a/test/th5s.c +++ b/test/th5s.c @@ -125,12 +125,18 @@ test_h5s_basic(void) hsize_t tdims[4]; /* Dimension array to test with */ hsize_t tmax[4]; hssize_t n; /* Number of dataspace elements */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Dataspace Manipulation\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + sid1 = H5Screate_simple(SPACE1_RANK, dims1, max2); CHECK(sid1, FAIL, "H5Screate_simple"); @@ -194,10 +200,13 @@ test_h5s_basic(void) * If this test fails and the H5S_MAX_RANK variable has changed, follow * the instructions in space_overflow.c for regenerating the th5s.h5 file. */ + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Check if VFD used is native file format compatible */ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK_I(ret, "h5_driver_is_default_vfd_compatible"); - if (driver_is_default_compatible) { + if (vol_is_native && driver_is_default_compatible) { const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */ fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); @@ -603,6 +612,11 @@ test_h5s_zero_dim(void) /* Output message about test being performed */ MESSAGE(5, ("Testing Dataspace with zero dimension size\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Initialize the data */ for (i = 0; i < SPACE1_DIM2; i++) for (j = 0; j < SPACE1_DIM3; j++) { @@ -3353,7 +3367,8 @@ test_versionbounds(void) hsize_t dim[1]; /* Dataset dimensions */ H5F_libver_t low, high; /* File format bounds */ H5S_t *spacep = NULL; /* Pointer to internal dataspace */ - herr_t ret = 0; /* Generic return value */ + bool vol_is_native; + herr_t ret = 0; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Version Bounds\n")); @@ -3362,6 +3377,9 @@ test_versionbounds(void) fapl = H5Pcreate(H5P_FILE_ACCESS); CHECK(fapl, FAIL, "H5Pcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataspace */ dim[0] = 10; space = H5Screate_simple(1, dim, NULL); @@ -3389,11 +3407,14 @@ test_versionbounds(void) /* Get the internal dataspace pointer */ dset_space = H5Dget_space(dset); CHECK(dset_space, FAIL, "H5Dget_space"); - spacep = (H5S_t *)H5I_object(dset_space); - CHECK_PTR(spacep, "H5I_object"); - /* Dataspace version should remain as H5O_SDSPACE_VERSION_1 */ - VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound"); + if (vol_is_native) { + spacep = (H5S_t *)H5I_object(dset_space); + CHECK_PTR(spacep, "H5I_object"); + + /* Dataspace version should remain as H5O_SDSPACE_VERSION_1 */ + VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound"); + } /* Close dataspace */ ret = H5Sclose(dset_space); @@ -3426,11 +3447,14 @@ test_versionbounds(void) /* Get the internal dataspace pointer */ dset_space = H5Dget_space(dset); CHECK(dset_space, FAIL, "H5Dget_space"); - spacep = (H5S_t *)H5I_object(dset_space); - CHECK_PTR(spacep, "H5I_object"); - /* Verify the dataspace version */ - VERIFY(spacep->extent.version, H5O_sdspace_ver_bounds[low], "upgraded dataspace version"); + if (vol_is_native) { + spacep = (H5S_t *)H5I_object(dset_space); + CHECK_PTR(spacep, "H5I_object"); + + /* Verify the dataspace version */ + VERIFY(spacep->extent.version, H5O_sdspace_ver_bounds[low], "upgraded dataspace version"); + } /* Close everything */ ret = H5Sclose(dset_space); diff --git a/test/titerate.c b/test/titerate.c index 57b4d0664fd..3c0b82e68f2 100644 --- a/test/titerate.c +++ b/test/titerate.c @@ -90,6 +90,13 @@ liter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info2_t H5_ATTR static int count = 0; static int count2 = 0; + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { + SKIPPED(); + printf(" API functions for iterate aren't " + "supported with this connector\n"); + return 1; + } + strcpy(info->name, name); switch (info->command) { @@ -138,6 +145,14 @@ test_iter_group(hid_t fapl, bool new_format) /* Output message about test being performed */ MESSAGE(5, ("Testing Group Iteration Functionality\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create the test file with the datasets */ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(file, FAIL, "H5Fcreate"); @@ -306,6 +321,7 @@ test_iter_group(hid_t fapl, bool new_format) info.command = RET_TWO; i = 0; idx = 0; + memset(info.name, 0, NAMELEN); while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0) { /* Verify return value from iterator gets propagated correctly */ VERIFY(ret, 2, "H5Literate2"); @@ -315,11 +331,13 @@ test_iter_group(hid_t fapl, bool new_format) /* Verify that the index is the correct value */ VERIFY(idx, (hsize_t)i, "H5Literate2"); + if (idx != (hsize_t)i) + break; if (idx > (NDATASETS + 2)) TestErrPrintf("Group iteration function walked too far!\n"); /* Verify that the correct name is retrieved */ - if (strcmp(info.name, lnames[(size_t)(idx - 1)]) != 0) + if (strncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0) TestErrPrintf( "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); @@ -335,6 +353,7 @@ test_iter_group(hid_t fapl, bool new_format) info.command = new_format ? RET_CHANGE2 : RET_CHANGE; i = 0; idx = 0; + memset(info.name, 0, NAMELEN); while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) >= 0) { /* Verify return value from iterator gets propagated correctly */ VERIFY(ret, 1, "H5Literate2"); @@ -344,11 +363,13 @@ test_iter_group(hid_t fapl, bool new_format) /* Verify that the index is the correct value */ VERIFY(idx, (hsize_t)(i + 10), "H5Literate2"); + if (idx != (hsize_t)(i + 10)) + break; if (idx > (NDATASETS + 2)) TestErrPrintf("Group iteration function walked too far!\n"); /* Verify that the correct name is retrieved */ - if (strcmp(info.name, lnames[(size_t)(idx - 1)]) != 0) + if (strncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0) TestErrPrintf( "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n", (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]); @@ -424,6 +445,14 @@ test_iter_attr(hid_t fapl, bool new_format) /* Output message about test being performed */ MESSAGE(5, ("Testing Attribute Iteration Functionality\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + + memset(&info, 0, sizeof(iter_info)); + /* Create the test file with the datasets */ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(file, FAIL, "H5Fcreate"); @@ -509,10 +538,14 @@ test_iter_attr(hid_t fapl, bool new_format) /* Don't check name when new format is used */ if (!new_format) { /* Verify that the correct name is retrieved */ - if (strcmp(info.name, anames[(size_t)idx - 1]) != 0) - TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = " - "'%s', anames[%u] = '%s'!\n", - __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); + if (idx > 0) { + if (strcmp(info.name, anames[(size_t)idx - 1]) != 0) + TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = " + "'%s', anames[%u] = '%s'!\n", + __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); + } + else + TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__); } /* end if */ } /* end while */ VERIFY(ret, -1, "H5Aiterate2"); @@ -538,10 +571,14 @@ test_iter_attr(hid_t fapl, bool new_format) /* Don't check name when new format is used */ if (!new_format) { /* Verify that the correct name is retrieved */ - if (strcmp(info.name, anames[(size_t)idx - 1]) != 0) - TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = " - "'%s', anames[%u] = '%s'!\n", - __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); + if (idx > 0) { + if (strcmp(info.name, anames[(size_t)idx - 1]) != 0) + TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = " + "'%s', anames[%u] = '%s'!\n", + __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]); + } + else + TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__); } /* end if */ } /* end while */ VERIFY(ret, -1, "H5Aiterate2"); @@ -584,6 +621,13 @@ liter_cb2(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_UNUSED *link H5O_info2_t oinfo; herr_t ret; /* Generic return value */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) { + SKIPPED(); + printf(" API functions for iterate and basic links aren't " + "supported with this connector\n"); + return 1; + } + if (strcmp(name, test_info->name) != 0) { TestErrPrintf("name = '%s', test_info = '%s'\n", name, test_info->name); return (H5_ITER_ERROR); @@ -638,6 +682,13 @@ test_iter_group_large(hid_t fapl) /* Output message about test being performed */ MESSAGE(5, ("Testing Large Group Iteration Functionality\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(file, FAIL, "H5Fcreate"); @@ -1008,6 +1059,7 @@ test_corrupted_attnamelen(void) searched_err_t err_caught; /* Data to be passed to callback func */ int err_status; /* Status returned by H5Aiterate2 */ herr_t ret; /* Return value */ + bool vol_is_native; bool driver_is_default_compatible; const char *testfile = H5_get_srcdir_filename(CORRUPTED_ATNAMELEN_FILE); /* Corrected test file name */ @@ -1020,11 +1072,18 @@ test_corrupted_attnamelen(void) /* Output message about test being performed */ MESSAGE(5, ("Testing the Handling of Corrupted Attribute's Name Length\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } @@ -1078,6 +1137,7 @@ test_links_deprec(hid_t fapl) hid_t gid, gid1; H5G_info_t ginfo; /* Buffer for querying object's info */ hsize_t i; + bool vol_is_native; herr_t ret; /* Generic return value */ /* Output message about test being performed */ @@ -1087,6 +1147,14 @@ test_links_deprec(hid_t fapl) file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(file, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, file, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(file), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* create groups */ gid = H5Gcreate2(file, "/g1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(gid, FAIL, "H5Gcreate2"); diff --git a/test/tmisc.c b/test/tmisc.c index c43f54fe07f..a8103afa16d 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -451,6 +451,9 @@ test_misc2_write_attribute(void) char *string_att1 = strdup("string attribute in file one"); char *string_att2 = strdup("string attribute in file two"); + memset(&data, 0, sizeof(data)); + memset(&data_check, 0, sizeof(data_check)); + type = misc2_create_type(); dataspace = H5Screate(H5S_SCALAR); @@ -1148,6 +1151,7 @@ test_misc7(void) { hid_t fid, did, tid, sid; int enum_value = 1; + bool vol_is_native; herr_t ret; /* Output message about test being performed */ @@ -1159,6 +1163,14 @@ test_misc7(void) fid = H5Fcreate(MISC7_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create the dataspace */ sid = H5Screate(H5S_SCALAR); CHECK(sid, FAIL, "H5Screate"); @@ -1255,10 +1267,10 @@ test_misc8(void) hsize_t storage_size; /* Number of bytes of raw data storage used */ int *wdata; /* Data to write */ int *tdata; /* Temporary pointer to data write */ -#ifdef VERIFY_DATA - int *rdata; /* Data to read */ - int *tdata2; /* Temporary pointer to data to read */ -#endif /* VERIFY_DATA */ +#ifndef H5_HAVE_PARALLEL + int *rdata; /* Data to read */ + int *tdata2; /* Temporary pointer to data to read */ +#endif unsigned u, v; /* Local index variables */ int mdc_nelmts; /* Metadata number of elements */ size_t rdcc_nelmts; /* Raw data number of elements */ @@ -1266,25 +1278,12 @@ test_misc8(void) double rdcc_w0; /* Raw data write percentage */ hsize_t start[MISC8_RANK]; /* Hyperslab start */ hsize_t count[MISC8_RANK]; /* Hyperslab block count */ + bool vol_is_native; herr_t ret; /* Output message about test being performed */ MESSAGE(5, ("Testing dataset storage sizes\n")); - /* Allocate space for the data to write & read */ - wdata = (int *)malloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); - CHECK_PTR(wdata, "malloc"); -#ifdef VERIFY_DATA - rdata = (int *)malloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); - CHECK_PTR(rdata, "malloc"); -#endif /* VERIFY_DATA */ - - /* Initialize values */ - tdata = wdata; - for (u = 0; u < MISC8_DIM0; u++) - for (v = 0; v < MISC8_DIM1; v++) - *tdata++ = (int)(((u * MISC8_DIM1) + v) % 13); - /* Create a file access property list */ fapl = H5Pcreate(H5P_FILE_ACCESS); CHECK(fapl, FAIL, "H5Pcreate"); @@ -1308,6 +1307,28 @@ test_misc8(void) ret = H5Pclose(fapl); CHECK(ret, FAIL, "H5Pclose"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5I_INVALID_HID, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + + /* Allocate space for the data to write & read */ + wdata = (int *)malloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); + CHECK_PTR(wdata, "malloc"); +#ifndef H5_HAVE_PARALLEL + rdata = (int *)malloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1); + CHECK_PTR(rdata, "malloc"); +#endif + + /* Initialize values */ + tdata = wdata; + for (u = 0; u < MISC8_DIM0; u++) + for (v = 0; v < MISC8_DIM1; v++) + *tdata++ = (int)(((u * MISC8_DIM1) + v) % 13); + /* Create a simple dataspace */ sid = H5Screate_simple(rank, dims, NULL); CHECK(sid, FAIL, "H5Screate_simple"); @@ -1537,7 +1558,6 @@ test_misc8(void) ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); CHECK(ret, FAIL, "H5Dwrite"); -#ifdef VERIFY_DATA /* Read data */ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); CHECK(ret, FAIL, "H5Dread"); @@ -1550,7 +1570,6 @@ test_misc8(void) if (*tdata != *tdata2) TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, (unsigned)v, (int)*tdata, (int)*tdata2); -#endif /* VERIFY_DATA */ /* Check the storage size after data is written */ storage_size = H5Dget_storage_size(did); @@ -1632,7 +1651,6 @@ test_misc8(void) ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); CHECK(ret, FAIL, "H5Dwrite"); -#ifdef VERIFY_DATA /* Read data */ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); CHECK(ret, FAIL, "H5Dread"); @@ -1645,7 +1663,6 @@ test_misc8(void) if (*tdata != *tdata2) TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, (unsigned)v, (int)*tdata, (int)*tdata2); -#endif /* VERIFY_DATA */ /* Check the storage size after data is written */ storage_size = H5Dget_storage_size(did); @@ -1697,7 +1714,6 @@ test_misc8(void) ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata); CHECK(ret, FAIL, "H5Dwrite"); -#ifdef VERIFY_DATA /* Read data */ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); CHECK(ret, FAIL, "H5Dread"); @@ -1710,7 +1726,6 @@ test_misc8(void) if (*tdata != *tdata2) TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u, (unsigned)v, (int)*tdata, (int)*tdata2); -#endif /* VERIFY_DATA */ /* Check the storage size after data is written */ storage_size = H5Dget_storage_size(did); @@ -1744,9 +1759,9 @@ test_misc8(void) /* Free the read & write buffers */ free(wdata); -#ifdef VERIFY_DATA +#ifndef H5_HAVE_PARALLEL free(rdata); -#endif /* VERIFY_DATA */ +#endif } /* end test_misc8() */ /**************************************************************** @@ -1800,25 +1815,35 @@ test_misc10(void) hid_t dcpl; /* Dataset creation property list */ hid_t space, type; /* Old dataset's dataspace & datatype */ const char *testfile = H5_get_srcdir_filename(MISC10_FILE_OLD); /* Corrected test file name */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Output message about test being performed */ MESSAGE(5, ("Testing using old dataset creation property list\n")); - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); - - if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); - return; - } - /* * Open the old file and the dataset and get old settings. */ file = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK(file, FAIL, "H5Fopen"); + + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(file), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Check if VFD used is native file format compatible */ + CHECK(h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible), FAIL, + "h5_driver_is_default_vfd_compatible"); + if (!driver_is_default_compatible) { + CHECK(H5Fclose(file), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + fcpl = H5Fget_create_plist(file); CHECK(fcpl, FAIL, "H5Fget_create_plist"); @@ -1884,7 +1909,8 @@ test_misc11(void) H5F_fspace_strategy_t strategy; /* File space strategy */ hsize_t threshold; /* Free-space section threshold */ bool persist; /* To persist free-space or not */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing file creation properties retrieved correctly\n")); @@ -1897,12 +1923,17 @@ test_misc11(void) file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file, FAIL, "H5Fcreate"); - /* Get the file's version information */ - ret = H5Fget_info2(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info2"); - VERIFY(finfo.super.version, 0, "H5Fget_info2"); - VERIFY(finfo.free.version, 0, "H5Fget_info2"); - VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native), FAIL, "h5_using_native_vol"); + + if (vol_is_native) { + /* Get the file's version information */ + ret = H5Fget_info2(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info2"); + VERIFY(finfo.super.version, 0, "H5Fget_info2"); + VERIFY(finfo.free.version, 0, "H5Fget_info2"); + VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); + } /* Close file */ ret = H5Fclose(file); @@ -1955,16 +1986,21 @@ test_misc11(void) file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT); CHECK(file, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Close FCPL */ ret = H5Pclose(fcpl); CHECK(ret, FAIL, "H5Pclose"); - /* Get the file's version information */ - ret = H5Fget_info2(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info2"); - VERIFY(finfo.super.version, 2, "H5Fget_info2"); - VERIFY(finfo.free.version, 0, "H5Fget_info2"); - VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); + if (vol_is_native) { + /* Get the file's version information */ + ret = H5Fget_info2(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info2"); + VERIFY(finfo.super.version, 2, "H5Fget_info2"); + VERIFY(finfo.free.version, 0, "H5Fget_info2"); + VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); + } /* Close file */ ret = H5Fclose(file); @@ -1974,16 +2010,21 @@ test_misc11(void) file = H5Fopen(MISC11_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK(file, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Get the file's creation property list */ fcpl = H5Fget_create_plist(file); CHECK(fcpl, FAIL, "H5Fget_create_plist"); - /* Get the file's version information */ - ret = H5Fget_info2(file, &finfo); - CHECK(ret, FAIL, "H5Fget_info2"); - VERIFY(finfo.super.version, 2, "H5Fget_info2"); - VERIFY(finfo.free.version, 0, "H5Fget_info2"); - VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); + if (vol_is_native) { + /* Get the file's version information */ + ret = H5Fget_info2(file, &finfo); + CHECK(ret, FAIL, "H5Fget_info2"); + VERIFY(finfo.super.version, 2, "H5Fget_info2"); + VERIFY(finfo.free.version, 0, "H5Fget_info2"); + VERIFY(finfo.sohm.version, 0, "H5Fget_info2"); + } /* Retrieve all the property values & check them */ ret = H5Pget_userblock(fcpl, &userblock); @@ -2500,6 +2541,12 @@ test_misc13(void) unsigned *data = NULL; /* Data to write to dataset */ hsize_t userblock_size; /* Correct size of userblock */ bool check_for_new_dataset; /* Whether to check for the post-userblock-creation dataset */ + bool vol_is_native; + + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) + return; /* Create a data buffer for the datasets */ data = (unsigned *)calloc(MISC13_DIM1, sizeof(unsigned)); @@ -2830,6 +2877,9 @@ test_misc16(void) hsize_t dims[] = {MISC16_SPACE_DIM}; int i; + memset(wdata, 0, sizeof(wdata)); + memset(rdata, 0, sizeof(rdata)); + /* Initialize the data */ /* (Note that these are supposed to stress the code, so are a little weird) */ memcpy(wdata[0], "1234567", MISC16_STR_SIZE); @@ -2918,6 +2968,9 @@ test_misc17(void) hsize_t dims[] = {MISC17_SPACE_DIM1, MISC17_SPACE_DIM2}; int i; + memset(wdata, 0, sizeof(wdata)); + memset(rdata, 0, sizeof(rdata)); + /* Initialize the data */ /* (Note that these are supposed to stress the code, so are a little weird) */ memcpy(wdata[0], "1234567", MISC17_SPACE_DIM2); @@ -3003,12 +3056,16 @@ test_misc18(void) H5O_native_info_t ninfo; /* Native file format information about object */ char attr_name[32]; /* Attribute name buffer */ unsigned u; /* Local index variable */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Create the file */ fid = H5Fcreate(MISC18_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataspace for attributes */ sid = H5Screate(H5S_SCALAR); CHECK(sid, FAIL, "H5Screate"); @@ -3021,22 +3078,25 @@ test_misc18(void) ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); CHECK(ret, FAIL, "H5Oget_info_by_name"); VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name"); + + if (vol_is_native) { #ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); + ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); + ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); + } /* Create second dataset */ did2 = H5Dcreate2(fid, MISC18_DSET2_NAME, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -3046,22 +3106,25 @@ test_misc18(void) ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); CHECK(ret, FAIL, "H5Oget_info_by_name"); VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name"); + + if (vol_is_native) { #ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); + ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name"); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); + ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name"); + } /* Loop creating attributes on each dataset, flushing them to the file each time */ for (u = 0; u < 10; u++) { @@ -3091,43 +3154,49 @@ test_misc18(void) ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); CHECK(ret, FAIL, "H5Oget_info_by_name"); VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name"); + + if (vol_is_native) { #ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); + ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); + ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); + } /* Get object information for dataset #2 now */ ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT); CHECK(ret, FAIL, "H5Oget_info_by_name"); VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name"); + + if (vol_is_native) { #ifndef H5_NO_DEPRECATED_SYMBOLS - ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, - H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); - VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); - VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); + ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS, + H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name"); + VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name"); + VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name"); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oget_mative_info_by_name"); - VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); - VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); + ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oget_mative_info_by_name"); + VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name"); + VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name"); + } /* Close second dataset */ ret = H5Dclose(did2); @@ -3702,17 +3771,30 @@ test_misc20(void) unsigned version; /* Version of storage layout info */ hsize_t contig_size; /* Size of contiguous storage size from layout into */ const char *testfile = H5_get_srcdir_filename(MISC20_FILE_OLD); /* Corrected test file name */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing large dimension truncation fix\n")); - ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); - CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + /* Create the file */ + fid = H5Fcreate(MISC20_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Check if VFD used is native file format compatible */ + CHECK(h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible), FAIL, + "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + CHECK(H5Fclose(fid), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } @@ -3730,10 +3812,6 @@ test_misc20(void) * been truncated. */ - /* Create the file */ - fid = H5Fcreate(MISC20_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid, FAIL, "H5Fcreate"); - /* Create dataspace with _really_ big dimensions */ sid = H5Screate_simple(rank, big_dims, NULL); CHECK(sid, FAIL, "H5Screate_simple"); @@ -4400,22 +4478,27 @@ test_misc23(void) * test H5Lcreate_external() **********************************************************************/ - status = H5Lcreate_external("fake_filename", "fake_path", file_id, "/A/B20/grp", create_id, access_id); - CHECK(status, FAIL, "H5Lcreate_external"); + if (vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) { + status = + H5Lcreate_external("fake_filename", "fake_path", file_id, "/A/B20/grp", create_id, access_id); + CHECK(status, FAIL, "H5Lcreate_external"); - tri_status = H5Lexists(file_id, "/A/B20/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); + tri_status = H5Lexists(file_id, "/A/B20/grp", access_id); + VERIFY(tri_status, true, "H5Lexists"); + } /********************************************************************** * test H5Lcreate_ud() **********************************************************************/ - status = - H5Lcreate_ud(file_id, "/A/B21/grp", H5L_TYPE_EXTERNAL, "file\0obj", (size_t)9, create_id, access_id); - CHECK(status, FAIL, "H5Lcreate_ud"); + if (vol_cap_flags_g & H5VL_CAP_FLAG_UD_LINKS) { + status = H5Lcreate_ud(file_id, "/A/B21/grp", H5L_TYPE_EXTERNAL, "file\0obj", (size_t)9, create_id, + access_id); + CHECK(status, FAIL, "H5Lcreate_ud"); - tri_status = H5Lexists(file_id, "/A/B21/grp", access_id); - VERIFY(tri_status, true, "H5Lexists"); + tri_status = H5Lexists(file_id, "/A/B21/grp", access_id); + VERIFY(tri_status, true, "H5Lexists"); + } /********************************************************************** * close @@ -4998,17 +5081,25 @@ test_misc25b(void) hid_t fid; /* File ID */ hid_t gid; /* Group ID */ const char *testfile = H5_get_srcdir_filename(MISC25B_FILE); /* Corrected test file name */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Exercise null object header message bug\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } @@ -5259,17 +5350,25 @@ test_misc27(void) hid_t fid; /* File ID */ hid_t gid; /* Group ID */ const char *testfile = H5_get_srcdir_filename(MISC27_FILE); /* Corrected test file name */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Corrupt object header handling\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } @@ -5324,6 +5423,7 @@ test_misc28(void) int nused; char buf[MISC28_SIZE]; int i; + bool vol_is_native; herr_t ret; /* Generic return value */ /* Output message about test being performed */ @@ -5350,17 +5450,22 @@ test_misc28(void) fid = H5Fcreate(MISC28_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(fapl, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + sidf = H5Screate_simple(2, dims, NULL); CHECK(sidf, FAIL, "H5Screate_simple"); did = H5Dcreate2(fid, "dataset", H5T_NATIVE_CHAR, sidf, H5P_DEFAULT, dcpl, H5P_DEFAULT); CHECK(did, FAIL, "H5Dcreate2"); - /* Verify that the chunk cache is empty */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); - VERIFY(nused, 0, "H5D__current_cache_size_test"); + if (vol_is_native) { + /* Verify that the chunk cache is empty */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); + VERIFY(nused, 0, "H5D__current_cache_size_test"); + } /* Initialize write buffer */ for (i = 0; i < MISC28_SIZE; i++) @@ -5377,11 +5482,13 @@ test_misc28(void) ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); CHECK(ret, FAIL, "H5Dwrite"); - /* Verify that all 10 chunks written have been cached */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + if (vol_is_native) { + /* Verify that all 10 chunks written have been cached */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + } /* Initialize write buffer */ for (i = 0; i < MISC28_SIZE; i++) @@ -5396,11 +5503,13 @@ test_misc28(void) ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf); CHECK(ret, FAIL, "H5Dwrite"); - /* Verify that the size of the cache remains at 10 */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + if (vol_is_native) { + /* Verify that the size of the cache remains at 10 */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + } /* Close dataset */ ret = H5Dclose(did); @@ -5410,11 +5519,13 @@ test_misc28(void) did = H5Dopen2(fid, "dataset", H5P_DEFAULT); CHECK(did, FAIL, "H5Dopen2"); - /* Verify that the chunk cache is empty */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); - VERIFY(nused, 0, "H5D__current_cache_size_test"); + if (vol_is_native) { + /* Verify that the chunk cache is empty */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test"); + VERIFY(nused, 0, "H5D__current_cache_size_test"); + } /* Select hyperslabe for reading */ start[1] = 0; @@ -5429,11 +5540,13 @@ test_misc28(void) for (i = 0; i < MISC28_SIZE; i++) VERIFY(buf[i], i, "H5Dread"); - /* Verify that all 10 chunks read have been cached */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + if (vol_is_native) { + /* Verify that all 10 chunks read have been cached */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + } /* Select new hyperslab */ start[1] = 1; @@ -5448,11 +5561,13 @@ test_misc28(void) for (i = 0; i < MISC28_SIZE; i++) VERIFY(buf[i], MISC28_SIZE - 1 - i, "H5Dread"); - /* Verify that the size of the cache remains at 10 */ - ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); - CHECK(ret, FAIL, "H5D__current_cache_size_test"); - VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); - VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + if (vol_is_native) { + /* Verify that the size of the cache remains at 10 */ + ret = H5D__current_cache_size_test(did, &nbytes_used, &nused); + CHECK(ret, FAIL, "H5D__current_cache_size_test"); + VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test"); + VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test"); + } /* Close dataset */ ret = H5Dclose(did); @@ -5482,6 +5597,7 @@ test_misc28(void) static void test_misc29(void) { + bool vol_is_native; bool driver_is_default_compatible; hid_t fid; /* File ID */ herr_t ret; /* Generic return value */ @@ -5489,11 +5605,18 @@ test_misc29(void) /* Output message about test being performed */ MESSAGE(5, ("Speculative metadata reads\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } @@ -5541,10 +5664,18 @@ test_misc30(void) { hsize_t file_size[] = {0, 0}; /* Sizes of file created */ unsigned get_info; /* Whether to perform the get info call */ + bool vol_is_native; /* Output message about test being performed */ MESSAGE(5, ("Local heap dropping free block info\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + for (get_info = false; get_info <= true; get_info++) { hid_t fid; /* File ID */ hid_t gid; /* Group ID */ @@ -5610,8 +5741,9 @@ test_misc31(void) hid_t attr_id; /* Attribute id */ hid_t group_id; /* Group id */ hid_t dtype_id; /* Datatype id */ - herr_t ret; /* Generic return value */ -#endif /* H5_NO_DEPRECATED_SYMBOLS */ + bool vol_is_native; + herr_t ret; /* Generic return value */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ /* Output message about test being performed */ MESSAGE(5, ("Deprecated routines initialize after H5close()\n")); @@ -5620,6 +5752,14 @@ test_misc31(void) file_id = H5Fcreate(MISC31_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file_id, FAIL, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, file_id, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(file_id), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Test dataset package */ space_id = H5Screate(H5S_SCALAR); CHECK(space_id, FAIL, "H5Screate"); @@ -5774,17 +5914,25 @@ test_misc33(void) hid_t fid = H5I_INVALID_HID; /* File ID */ const char *testfile = H5_get_srcdir_filename(MISC33_FILE); /* Corrected test file name */ H5O_info2_t oinfo; /* Structure for object metadata information */ + bool vol_is_native; bool driver_is_default_compatible; herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing that bad offset into the heap returns error")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } @@ -6072,6 +6220,7 @@ static void test_misc37(void) { const char *testfile = H5_get_srcdir_filename(CVE_2020_10812_FILENAME); + bool vol_is_native; bool driver_is_default_compatible; hid_t fid; herr_t ret; @@ -6079,11 +6228,18 @@ test_misc37(void) /* Output message about test being performed */ MESSAGE(5, ("Fix for HDFFV-11052/CVE-2020-10812")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); if (!driver_is_default_compatible) { - printf("-- SKIPPED --\n"); + MESSAGE(5, (" -- SKIPPED --\n")); return; } diff --git a/test/trefer.c b/test/trefer.c index f17e114fd67..b8f91a03363 100644 --- a/test/trefer.c +++ b/test/trefer.c @@ -104,7 +104,8 @@ test_reference_params(void) const char *write_comment = "Foo!"; /* Comments for group */ hid_t ret_id; /* Generic hid_t return value */ ssize_t name_size; /* Size of reference name */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Reference Parameters\n")); @@ -122,6 +123,9 @@ test_reference_params(void) fid1 = H5Fcreate(FILE_REF_PARAM, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid1, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataspace for datasets */ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); @@ -138,9 +142,11 @@ test_reference_params(void) group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(group, H5I_INVALID_HID, "H5Gcreate2"); - /* Set group's comment */ - ret = H5Oset_comment(group, write_comment); - CHECK(ret, FAIL, "H5Oset_comment"); + if (vol_is_native) { + /* Set group's comment */ + ret = H5Oset_comment(group, write_comment); + CHECK(ret, FAIL, "H5Oset_comment"); + } /* Create a dataset (inside Group1) */ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -2026,6 +2032,12 @@ test_reference_obj_deleted(void) MESSAGE(5, ("Testing References to Deleted Objects\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_REF_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Create file */ fid1 = H5Fcreate(FILE_REF_OBJ_DEL, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); @@ -2848,22 +2860,31 @@ test_reference_compat_conv(void) hdset_reg_ref_t *wbuf_reg = NULL; /* Buffer to write to disk */ H5R_ref_t *rbuf_reg = NULL; /* Buffer read from disk */ H5O_type_t obj_type; /* Object type */ - herr_t ret; /* Generic return value */ - unsigned int i; /* Counter */ + bool vol_is_native; + herr_t ret; /* Generic return value */ + unsigned int i; /* Counter */ /* Output message about test being performed */ MESSAGE(5, ("Testing Deprecated Object Reference Functions\n")); + /* Create file */ + fid1 = H5Fcreate(FILE_REF_COMPAT, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid1, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + CHECK(H5Fclose(fid1), FAIL, "H5Fclose"); + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Allocate write & read buffers */ wbuf_obj = (hobj_ref_t *)calloc(sizeof(hobj_ref_t), SPACE1_DIM1); rbuf_obj = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); wbuf_reg = calloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1); rbuf_reg = calloc(sizeof(H5R_ref_t), SPACE1_DIM1); - /* Create file */ - fid1 = H5Fcreate(FILE_REF_COMPAT, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); - /* Create dataspace for datasets */ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); @@ -3158,8 +3179,9 @@ test_reference_perf(void) hdset_reg_ref_t *wbuf_reg_deprec, /* deprecated references*/ *rbuf_reg_deprec; /* deprecated references*/ unsigned *ibuf, *obuf; - unsigned i, j; /* Counters */ - H5O_type_t obj_type; /* Object type */ + unsigned i, j; /* Counters */ + H5O_type_t obj_type; /* Object type */ + bool vol_is_native; herr_t ret; /* Generic return value */ double t1, t2, t; /* Timers */ @@ -3186,6 +3208,9 @@ test_reference_perf(void) fid1 = H5Fcreate(FILE_REF_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid1, H5I_INVALID_HID, "H5Fcreate"); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid1, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create dataspace for datasets */ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL); CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple"); @@ -3284,40 +3309,42 @@ test_reference_perf(void) ret = H5Dclose(dataset); CHECK(ret, FAIL, "H5Dclose"); - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + if (vol_is_native) { + /* Create a dataset */ + dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + t = 0; + for (i = 0; i < MAX_ITER_CREATE; i++) { + t1 = H5_get_time(); + ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID); + CHECK(ret, FAIL, "H5Rcreate"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + printf("--- Deprecated object reference create time: %lfs\n", t / MAX_ITER_CREATE); - t = 0; - for (i = 0; i < MAX_ITER_CREATE; i++) { - t1 = H5_get_time(); + /* Create reference to dataset */ ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID); CHECK(ret, FAIL, "H5Rcreate"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated object reference create time: %lfs\n", t / MAX_ITER_CREATE); - /* Create reference to dataset */ - ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); + t = 0; + for (i = 0; i < MAX_ITER_WRITE; i++) { + t1 = H5_get_time(); + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_deprec); + CHECK(ret, FAIL, "H5Dwrite"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + printf("--- Deprecated object reference write time: %lfs\n", t / MAX_ITER_WRITE); - t = 0; - for (i = 0; i < MAX_ITER_WRITE; i++) { - t1 = H5_get_time(); - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_deprec); - CHECK(ret, FAIL, "H5Dwrite"); - t2 = H5_get_time(); - t += t2 - t1; + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); } - if (VERBOSE_MED) - printf("--- Deprecated object reference write time: %lfs\n", t / MAX_ITER_WRITE); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); /* Create a dataset */ dataset = H5Dcreate2(fid1, "Dataset5", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -3357,37 +3384,40 @@ test_reference_perf(void) ret = H5Dclose(dataset); CHECK(ret, FAIL, "H5Dclose"); - /* Create a dataset */ - dataset = H5Dcreate2(fid1, "Dataset6", H5T_STD_REF_DSETREG, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); - - t = 0; - for (i = 0; i < MAX_ITER_CREATE; i++) { - t1 = H5_get_time(); - /* Store first dataset region */ - ret = H5Rcreate(&wbuf_reg_deprec[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION1, sid1); - CHECK(ret, FAIL, "H5Rcreate"); - t2 = H5_get_time(); - t += t2 - t1; - } - if (VERBOSE_MED) - printf("--- Deprecated region reference create time: %lfs\n", t / MAX_ITER_CREATE); + if (vol_is_native) { + /* Create a dataset */ + dataset = + H5Dcreate2(fid1, "Dataset6", H5T_STD_REF_DSETREG, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); + + t = 0; + for (i = 0; i < MAX_ITER_CREATE; i++) { + t1 = H5_get_time(); + /* Store first dataset region */ + ret = H5Rcreate(&wbuf_reg_deprec[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION1, sid1); + CHECK(ret, FAIL, "H5Rcreate"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + printf("--- Deprecated region reference create time: %lfs\n", t / MAX_ITER_CREATE); + + t = 0; + for (i = 0; i < MAX_ITER_WRITE; i++) { + t1 = H5_get_time(); + /* Write selection to disk */ + ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg_deprec); + CHECK(ret, FAIL, "H5Dwrite"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + printf("--- Deprecated region reference write time: %lfs\n", t / MAX_ITER_WRITE); - t = 0; - for (i = 0; i < MAX_ITER_WRITE; i++) { - t1 = H5_get_time(); - /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg_deprec); - CHECK(ret, FAIL, "H5Dwrite"); - t2 = H5_get_time(); - t += t2 - t1; + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); } - if (VERBOSE_MED) - printf("--- Deprecated region reference write time: %lfs\n", t / MAX_ITER_WRITE); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); /* Close disk dataspace */ ret = H5Sclose(sid1); @@ -3449,25 +3479,27 @@ test_reference_perf(void) ret = H5Dclose(dataset); CHECK(ret, FAIL, "H5Dclose"); - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + if (vol_is_native) { + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + t = 0; + for (i = 0; i < MAX_ITER_READ; i++) { + t1 = H5_get_time(); + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_deprec); + CHECK(ret, FAIL, "H5Dread"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + printf("--- Deprecated object reference read time: %lfs\n", t / MAX_ITER_READ); - t = 0; - for (i = 0; i < MAX_ITER_READ; i++) { - t1 = H5_get_time(); - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_deprec); - CHECK(ret, FAIL, "H5Dread"); - t2 = H5_get_time(); - t += t2 - t1; + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); } - if (VERBOSE_MED) - printf("--- Deprecated object reference read time: %lfs\n", t / MAX_ITER_READ); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); /* Open the dataset */ dataset = H5Dopen2(fid1, "/Dataset5", H5P_DEFAULT); @@ -3495,25 +3527,27 @@ test_reference_perf(void) ret = H5Dclose(dataset); CHECK(ret, FAIL, "H5Dclose"); - /* Open the dataset */ - dataset = H5Dopen2(fid1, "/Dataset6", H5P_DEFAULT); - CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + if (vol_is_native) { + /* Open the dataset */ + dataset = H5Dopen2(fid1, "/Dataset6", H5P_DEFAULT); + CHECK(dataset, H5I_INVALID_HID, "H5Dopen2"); + + t = 0; + for (i = 0; i < MAX_ITER_READ; i++) { + t1 = H5_get_time(); + /* Read selection from disk */ + ret = H5Dread(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg_deprec); + CHECK(ret, FAIL, "H5Dread"); + t2 = H5_get_time(); + t += t2 - t1; + } + if (VERBOSE_MED) + printf("--- Deprecated region reference read time: %lfs\n", t / MAX_ITER_READ); - t = 0; - for (i = 0; i < MAX_ITER_READ; i++) { - t1 = H5_get_time(); - /* Read selection from disk */ - ret = H5Dread(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg_deprec); - CHECK(ret, FAIL, "H5Dread"); - t2 = H5_get_time(); - t += t2 - t1; + /* Close Dataset */ + ret = H5Dclose(dataset); + CHECK(ret, FAIL, "H5Dclose"); } - if (VERBOSE_MED) - printf("--- Deprecated region reference read time: %lfs\n", t / MAX_ITER_READ); - - /* Close Dataset */ - ret = H5Dclose(dataset); - CHECK(ret, FAIL, "H5Dclose"); /* Close dataset access property list */ ret = H5Pclose(dapl_id); diff --git a/test/trefer_deprec.c b/test/trefer_deprec.c index 5bb1f133c9a..bbcf630fbc3 100644 --- a/test/trefer_deprec.c +++ b/test/trefer_deprec.c @@ -1815,10 +1815,18 @@ void test_reference_deprec(void) { H5F_libver_t low, high; /* Low and high bounds */ + bool vol_is_native; /* Output message about test being performed */ MESSAGE(5, ("Testing Deprecated References\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + test_reference_params(); /* Test for correct parameter checking */ test_reference_obj(); /* Test basic H5R object reference code */ diff --git a/test/tsohm.c b/test/tsohm.c index b3f48d323d1..542fd688b81 100644 --- a/test/tsohm.c +++ b/test/tsohm.c @@ -3711,10 +3711,18 @@ void test_sohm(void) { const char *env_h5_drvr; + bool vol_is_native; bool default_driver; MESSAGE(5, ("Testing Shared Object Header Messages\n")); + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native), FAIL, "h5_using_native_vol"); + if (!vol_is_native) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + /* Get the VFD to use */ env_h5_drvr = getenv(HDF5_DRIVER); if (env_h5_drvr == NULL) diff --git a/test/tunicode.c b/test/tunicode.c index 705bdfd10cf..a65b469a8a8 100644 --- a/test/tunicode.c +++ b/test/tunicode.c @@ -382,25 +382,31 @@ test_objnames(hid_t fid, const char *string) hsize_t dims = 1; hobj_ref_t obj_ref; ssize_t size; + bool vol_is_native; herr_t ret; + /* Check if native VOL is being used */ + CHECK(h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native), FAIL, "h5_using_native_vol"); + /* Create a group with a UTF-8 name */ grp_id = H5Gcreate2(fid, string, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(grp_id, FAIL, "H5Gcreate2"); - /* Set a comment on the group to test that we can access the group - * Also test that UTF-8 comments can be read. - */ - ret = H5Oset_comment_by_name(fid, string, string, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Oset_comment_by_name"); - size = H5Oget_comment_by_name(fid, string, read_buf, (size_t)MAX_STRING_LENGTH, H5P_DEFAULT); - CHECK(size, FAIL, "H5Oget_comment_by_name"); + if (vol_is_native) { + /* Set a comment on the group to test that we can access the group + * Also test that UTF-8 comments can be read. + */ + ret = H5Oset_comment_by_name(fid, string, string, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Oset_comment_by_name"); + size = H5Oget_comment_by_name(fid, string, read_buf, (size_t)MAX_STRING_LENGTH, H5P_DEFAULT); + CHECK(size, FAIL, "H5Oget_comment_by_name"); + + VERIFY(strcmp(string, read_buf), 0, "strcmp"); + } ret = H5Gclose(grp_id); CHECK(ret, FAIL, "H5Gclose"); - VERIFY(strcmp(string, read_buf), 0, "strcmp"); - /* Create a new dataset with a UTF-8 name */ grp1_id = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(grp1_id, FAIL, "H5Gcreate2"); @@ -441,34 +447,35 @@ test_objnames(hid_t fid, const char *string) /* Don't close the group -- use it to test that object references * can refer to objects named in UTF-8 */ - - space_id = H5Screate_simple(RANK, &dims, NULL); - CHECK(space_id, FAIL, "H5Screate_simple"); - dset_id = - H5Dcreate2(grp2_id, DSET3_NAME, H5T_STD_REF_OBJ, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK(ret, FAIL, "H5Dcreate2"); - - /* Create reference to named datatype */ - ret = H5Rcreate(&obj_ref, grp2_id, string, H5R_OBJECT, (hid_t)H5I_INVALID_HID); - CHECK(ret, FAIL, "H5Rcreate"); - /* Write selection and read it back*/ - ret = H5Dwrite(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); - CHECK(ret, FAIL, "H5Dwrite"); - ret = H5Dread(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); - CHECK(ret, FAIL, "H5Dread"); - - /* Ensure that we can open named datatype using object reference */ - type_id = H5Rdereference2(dset_id, H5P_DEFAULT, H5R_OBJECT, &obj_ref); - CHECK(type_id, FAIL, "H5Rdereference2"); - ret = H5Tcommitted(type_id); - VERIFY(ret, 1, "H5Tcommitted"); - - ret = H5Tclose(type_id); - CHECK(type_id, FAIL, "H5Tclose"); - ret = H5Dclose(dset_id); - CHECK(ret, FAIL, "H5Dclose"); - ret = H5Sclose(space_id); - CHECK(ret, FAIL, "H5Sclose"); + if (vol_is_native) { + space_id = H5Screate_simple(RANK, &dims, NULL); + CHECK(space_id, FAIL, "H5Screate_simple"); + dset_id = + H5Dcreate2(grp2_id, DSET3_NAME, H5T_STD_REF_OBJ, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Dcreate2"); + + /* Create reference to named datatype */ + ret = H5Rcreate(&obj_ref, grp2_id, string, H5R_OBJECT, (hid_t)-1); + CHECK(ret, FAIL, "H5Rcreate"); + /* Write selection and read it back*/ + ret = H5Dwrite(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); + CHECK(ret, FAIL, "H5Dwrite"); + ret = H5Dread(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref); + CHECK(ret, FAIL, "H5Dread"); + + /* Ensure that we can open named datatype using object reference */ + type_id = H5Rdereference2(dset_id, H5P_DEFAULT, H5R_OBJECT, &obj_ref); + CHECK(type_id, FAIL, "H5Rdereference2"); + ret = H5Tcommitted(type_id); + VERIFY(ret, 1, "H5Tcommitted"); + + ret = H5Tclose(type_id); + CHECK(type_id, FAIL, "H5Tclose"); + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + } ret = H5Gclose(grp2_id); CHECK(ret, FAIL, "H5Gclose"); diff --git a/test/tvlstr.c b/test/tvlstr.c index a3ea04141b6..9f41a0570b3 100644 --- a/test/tvlstr.c +++ b/test/tvlstr.c @@ -882,6 +882,11 @@ test_write_same_element(void) MESSAGE(5, ("Testing writing to same element of VL string dataset twice\n")); + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + MESSAGE(5, (" -- SKIPPED --\n")); + return; + } + file1 = H5Fcreate(DATAFILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file1, FAIL, "H5Fcreate"); diff --git a/testpar/API/CMakeLists.txt b/testpar/API/CMakeLists.txt index 869a925d07c..818bee665d5 100644 --- a/testpar/API/CMakeLists.txt +++ b/testpar/API/CMakeLists.txt @@ -13,7 +13,7 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_TEST_PAR_API C) #------------------------------------------------------------------------------ -# Define for API tests +# Variables, definitions, etc. for API tests #------------------------------------------------------------------------------ set (HDF5_API_TESTS @@ -34,7 +34,9 @@ if (HDF5_TEST_API_ENABLE_ASYNC) ) endif () -# Ported HDF5 tests +# Extra HDF5 tests to run. Each entry in the list +# must be a CMake target name for a test executable +# that was added elsewhere in the project set (HDF5_API_PAR_TESTS_EXTRA t_bigio t_pshutdown @@ -43,9 +45,12 @@ set (HDF5_API_PAR_TESTS_EXTRA ) # List of files generated by the HDF5 API tests which -# should be cleaned up in case the test failed to remove -# them +# we should attempt to clean up in case the tests failed +# to remove them +# TODO: Run h5delete tool with appropriate env. vars for +# connectors to remove these files set (HDF5_API_PAR_TESTS_FILES + # TODO H5_api_test_parallel.h5 H5_api_async_test_parallel.h5 H5_api_async_test_parallel_0.h5 @@ -96,22 +101,23 @@ target_compile_definitions ( PRIVATE "$<$:${HDF5_DEVELOPER_DEFS}>" ) -if (NOT BUILD_SHARED_LIBS) - TARGET_C_PROPERTIES (h5_api_test_parallel STATIC) +# Always prefer linking the shared HDF5 library by default +if (BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (h5_api_test_parallel SHARED) target_link_libraries ( h5_api_test_parallel PRIVATE - ${HDF5_TEST_LIB_TARGET} - ${HDF5_LIB_TARGET} + ${HDF5_TEST_LIBSH_TARGET} + ${HDF5_LIBSH_TARGET} "$<$:MPI::MPI_C>" ) else () - TARGET_C_PROPERTIES (h5_api_test_parallel SHARED) + TARGET_C_PROPERTIES (h5_api_test_parallel STATIC) target_link_libraries ( h5_api_test_parallel PRIVATE - ${HDF5_TEST_LIBSH_TARGET} - ${HDF5_LIBSH_TARGET} + ${HDF5_TEST_LIB_TARGET} + ${HDF5_LIB_TARGET} "$<$:MPI::MPI_C>" ) endif () @@ -125,120 +131,15 @@ if (HDF5_ENABLE_FORMATTERS) clang_format (HDF5_TEST_h5_api_test_parallel_FORMAT h5_api_test_parallel) endif () -if (HDF5_TEST_API_INSTALL) - install ( - TARGETS - h5_api_test_parallel - EXPORT - ${HDF5_EXPORTED_TARGETS} - DESTINATION - ${HDF5_INSTALL_BIN_DIR} - PERMISSIONS - OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE - COMPONENT - tests - ) -endif () - -#----------------------------------------------------------------------------- -# Build the ported HDF5 test executables -#----------------------------------------------------------------------------- -foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA}) - unset (HDF5_API_PAR_TEST_EXTRA_SRCS) - - set (HDF5_API_PAR_TEST_EXTRA_SRCS - ${HDF5_API_PAR_TEST_EXTRA_SRCS} - ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c - ) - - if (${api_test_extra} STREQUAL "testphdf5") - set (HDF5_API_PAR_TEST_EXTRA_SRCS - ${HDF5_API_PAR_TEST_EXTRA_SRCS} - ${CMAKE_CURRENT_SOURCE_DIR}/t_ph5basic.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_file.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_dset.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_mdset.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_chunk.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_span_tree.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_prop.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_file_image.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_md_read.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_chunk_alloc.c - ${CMAKE_CURRENT_SOURCE_DIR}/t_filter_read.c - ) - endif () - - add_executable (h5_api_test_parallel_${api_test_extra} ${HDF5_API_PAR_TEST_EXTRA_SRCS}) - target_include_directories ( - h5_api_test_parallel_${api_test_extra} - PRIVATE - "${HDF5_SRC_INCLUDE_DIRS}" - "${HDF5_TEST_PAR_DIR}" - "${HDF5_TEST_API_SRC_DIR}" - "${HDF5_TEST_API_PAR_SRC_DIR}" - "${HDF5_SRC_BINARY_DIR}" - "${HDF5_TEST_BINARY_DIR}" - "$<$:${MPI_C_INCLUDE_DIRS}>" - ) - target_compile_options ( - h5_api_test_parallel_${api_test_extra} - PRIVATE - "${HDF5_CMAKE_C_FLAGS}" - ) - target_compile_definitions ( - h5_api_test_parallel_${api_test_extra} - PRIVATE - "$<$:${HDF5_DEVELOPER_DEFS}>" - ) - if (NOT BUILD_SHARED_LIBS) - TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} STATIC) - target_link_libraries ( - h5_api_test_parallel_${api_test_extra} - PRIVATE - ${HDF5_TEST_LIB_TARGET} - ${HDF5_LIB_TARGET} - "$<$:MPI::MPI_C>" - ) - else () - TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} SHARED) - target_link_libraries ( - h5_api_test_parallel_${api_test_extra} - PRIVATE - ${HDF5_TEST_LIBSH_TARGET} - ${HDF5_LIBSH_TARGET} - "$<$:MPI::MPI_C>" - ) - endif () - set_target_properties ( - h5_api_test_parallel_${api_test_extra} - PROPERTIES - FOLDER test/par/API - ) - # Add Target to clang-format - if (HDF5_ENABLE_FORMATTERS) - clang_format (HDF5_TEST_h5_api_test_parallel_${api_test_extra}_FORMAT h5_api_test_parallel_${api_test_extra}) - endif () - - if (HDF5_TEST_API_INSTALL) - install ( - TARGETS - h5_api_test_parallel_${api_test_extra} - EXPORT - ${HDF5_EXPORTED_TARGETS} - DESTINATION - ${HDF5_INSTALL_BIN_DIR} - PERMISSIONS - OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE - COMPONENT - tests - ) - endif () -endforeach () - #----------------------------------------------------------------------------- # Add tests if HDF5 parallel testing is enabled #----------------------------------------------------------------------------- if (HDF5_TEST_PARALLEL) + # Setup working directories for any external VOL connectors to be tested + foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) + file (MAKE_DIRECTORY "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}") + endforeach () + if (HDF5_TEST_API_ENABLE_DRIVER) if ("${HDF5_TEST_API_SERVER}" STREQUAL "") message (FATAL_ERROR "Please set HDF5_TEST_API_SERVER to point to a server executable for the test driver program.") @@ -259,6 +160,7 @@ if (HDF5_TEST_PARALLEL) ) endif () + # Add main API tests to test suite set (last_api_test "") foreach (api_test ${HDF5_API_TESTS}) add_test ( @@ -275,17 +177,6 @@ if (HDF5_TEST_PARALLEL) set (last_api_test "h5_api_test_parallel_${api_test}") endforeach () - foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) - add_test ( - NAME "h5_api_test_parallel_${hdf5_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - endforeach () - # Hook external tests to same test suite foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS}) add_test ( @@ -298,97 +189,103 @@ if (HDF5_TEST_PARALLEL) ) endforeach () - # Add tests for each external VOL connector that was built - foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) - # Determine whether connector should be tested with parallel tests - get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL) - if (${vol_test_parallel}) - # Determine environment variables that need to be set for testing - set (vol_test_env "") - set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - - get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) - list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") - - get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) - foreach (lib_target ${vol_lib_targets}) - get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) - if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" - AND NOT "${lib_target_output_dir}" STREQUAL "" - AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") - endif () - endforeach () - - list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") - - # Add main API tests - set (last_api_test "") - foreach (api_test ${HDF5_API_TESTS}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ "${api_test}" - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_parallel_${api_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - DEPENDS - "${last_api_test}" - ) - - set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}") - endforeach () - - # Add any extra HDF5 tests - foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - ) - endforeach () - - # Hook external tests to same test suite - foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS}) - add_test ( - NAME "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}" - COMMAND $ - --server ${HDF5_TEST_API_SERVER} - --client $ - --serial - ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - ) - endforeach () - endif () - endforeach () + if (BUILD_SHARED_LIBS) + # Add tests for each external VOL connector that was built, + # but only if executables that were linked to a shared HDF5 + # library are available, since static executables will cause + # issues when VOL connectors are loaded dynamically + foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) + # Determine whether connector should be tested with parallel tests + get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL) + if (${vol_test_parallel}) + # Determine environment variables that need to be set for testing + set (vol_test_env "") + set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + + get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) + list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") + + get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) + foreach (lib_target ${vol_lib_targets}) + get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) + if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" + AND NOT "${lib_target_output_dir}" STREQUAL "" + AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") + endif () + endforeach () + + list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") + + # Add main API tests to test suite + set (last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ "${api_test}" + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_parallel_${api_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + DEPENDS + "${last_api_test}" + ) + + set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}") + endforeach () + + # Add any extra HDF5 tests to test suite + foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + ) + endforeach () + + # Hook external tests to same test suite + foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS}) + add_test ( + NAME "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}" + COMMAND $ + --server ${HDF5_TEST_API_SERVER} + --client $ + --serial + ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_ext_test_parallel_${ext_api_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + ) + endforeach () + endif () + endforeach () + endif () else () + # Add main API tests to test suite set (last_api_test "") foreach (api_test ${HDF5_API_TESTS}) add_test ( @@ -403,80 +300,117 @@ if (HDF5_TEST_PARALLEL) set (last_api_test "h5_api_test_parallel_${api_test}") endforeach () - foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) - add_test ( - NAME "h5_api_test_parallel_${hdf5_test}" - COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} - ${MPIEXEC_PREFLAGS} $ - ${MPIEXEC_POSTFLAGS} - ) - endforeach () - - # Add tests for each external VOL connector that was built - foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) - # Determine whether connector should be tested with parallel tests - get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL) - if (${vol_test_parallel}) - # Determine environment variables that need to be set for testing - set (vol_test_env "") - set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - - get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) - list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") - - get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) - foreach (lib_target ${vol_lib_targets}) - get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) - if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" - AND NOT "${lib_target_output_dir}" STREQUAL "" - AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") - set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") - endif () - endforeach () - - list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") - - # Add main API tests - set (last_api_test "") - foreach (api_test ${HDF5_API_TESTS}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}" - COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} - ${MPIEXEC_PREFLAGS} $ "${api_test}" - ${MPIEXEC_POSTFLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_parallel_${api_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - DEPENDS - "${last_api_test}" - ) - - set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}") - endforeach () - - # Add any extra HDF5 tests - foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) - add_test ( - NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" - COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} - ${MPIEXEC_PREFLAGS} $ - ${MPIEXEC_POSTFLAGS} - ) - set_tests_properties ( - "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" - PROPERTIES - ENVIRONMENT - "${vol_test_env}" - WORKING_DIRECTORY - "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" - ) - endforeach () - endif () - endforeach () + if (BUILD_SHARED_LIBS) + # Add tests for each external VOL connector that was built, + # but only if executables that were linked to a shared HDF5 + # library are available, since static executables will cause + # issues when VOL connectors are loaded dynamically + foreach (external_vol_tgt ${HDF5_EXTERNAL_VOL_TARGETS}) + # Determine whether connector should be tested with parallel tests + get_target_property (vol_test_parallel "${external_vol_tgt}" HDF5_VOL_TEST_PARALLEL) + if (${vol_test_parallel}) + # Determine environment variables that need to be set for testing + set (vol_test_env "") + set (vol_plugin_paths "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + + get_target_property (vol_test_string "${external_vol_tgt}" HDF5_VOL_NAME) + list (APPEND vol_test_env "HDF5_VOL_CONNECTOR=${vol_test_string}") + + get_target_property (vol_lib_targets "${external_vol_tgt}" HDF5_VOL_TARGETS) + foreach (lib_target ${vol_lib_targets}) + get_target_property (lib_target_output_dir "${lib_target}" LIBRARY_OUTPUT_DIRECTORY) + if (NOT "${lib_target_output_dir}" STREQUAL "lib_target_output_dir-NOTFOUND" + AND NOT "${lib_target_output_dir}" STREQUAL "" + AND NOT "${lib_target_output_dir}" STREQUAL "${CMAKE_BINARY_DIR}/${HDF5_INSTALL_BIN_DIR}") + set (vol_plugin_paths "${vol_plugin_paths}${CMAKE_SEP}${lib_target_output_dir}") + endif () + endforeach () + + list (APPEND vol_test_env "HDF5_PLUGIN_PATH=${vol_plugin_paths}") + + # Add main API tests to test suite + set (last_api_test "") + foreach (api_test ${HDF5_API_TESTS}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_parallel_${api_test}" + COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} + ${MPIEXEC_PREFLAGS} $ "${api_test}" + ${MPIEXEC_POSTFLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_parallel_${api_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + DEPENDS + "${last_api_test}" + ) + + set (last_api_test "${external_vol_tgt}-h5_api_test_parallel_${api_test}") + endforeach () + + # Add any extra HDF5 tests to test suite + foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA}) + add_test ( + NAME "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" + COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} + ${MPIEXEC_PREFLAGS} $ + ${MPIEXEC_POSTFLAGS} + ) + set_tests_properties ( + "${external_vol_tgt}-h5_api_test_parallel_${hdf5_test}" + PROPERTIES + ENVIRONMENT + "${vol_test_env}" + WORKING_DIRECTORY + "${HDF5_TEST_BINARY_DIR}/${external_vol_tgt}" + ) + endforeach () + endif () + endforeach () + endif () endif () endif () + +#----------------------------------------------------------------------------- +# Install the main API test executable and any +# extra HDF5 tests if requested +#----------------------------------------------------------------------------- +if (HDF5_EXPORTED_TARGETS AND HDF5_TEST_API_INSTALL) + install ( + TARGETS + h5_api_test_parallel + EXPORT + ${HDF5_EXPORTED_TARGETS} + DESTINATION + ${HDF5_INSTALL_BIN_DIR} + PERMISSIONS + OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE + COMPONENT + tests + ) + + foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA}) + if (TARGET ${api_test_extra}) + set_target_properties ( + ${api_test_extra} + PROPERTIES + OUTPUT_NAME "h5_api_test_parallel_${api_test_extra}" + ) + install ( + TARGETS + ${api_test_extra} + EXPORT + ${HDF5_EXPORTED_TARGETS} + DESTINATION + ${HDF5_INSTALL_BIN_DIR} + PERMISSIONS + OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE + COMPONENT + tests + ) + endif () + endforeach () +endif () diff --git a/testpar/API/H5_api_dataset_test_parallel.c b/testpar/API/H5_api_dataset_test_parallel.c index 0d53d449e6d..169d5945e13 100644 --- a/testpar/API/H5_api_dataset_test_parallel.c +++ b/testpar/API/H5_api_dataset_test_parallel.c @@ -82,7 +82,6 @@ static int (*par_dataset_tests[])(void) = { * hyperslab selections and point selections. */ #define DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK 3 -#define DATASET_WRITE_DATA_VERIFY_TEST_NUM_POINTS 10 #define DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE H5T_NATIVE_INT #define DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE sizeof(int) #define DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME "dataset_write_data_verification_test" @@ -2142,11 +2141,13 @@ test_write_dataset_one_proc_all_selection(void) * * XXX: Currently pulls from invalid memory locations. */ +#ifdef BROKEN #define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK 2 #define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT #define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE sizeof(int) #define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME "hyper_sel_file_all_sel_mem_write_test" #define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME "hyper_sel_file_all_sel_mem_dset" +#endif static int test_write_dataset_hyper_file_all_mem(void) { diff --git a/testpar/API/t_bigio.c b/testpar/API/t_bigio.c deleted file mode 100644 index e7bdfb0f0e8..00000000000 --- a/testpar/API/t_bigio.c +++ /dev/null @@ -1,1938 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#include "hdf5.h" -#include "testphdf5.h" - -#if 0 -#include "H5Dprivate.h" /* For Chunk tests */ -#endif - -/* FILENAME and filenames must have the same number of names */ -const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL}; - -/* Constants definitions */ -#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ - -/* Define some handy debugging shorthands, routines, ... */ -/* debugging tools */ - -#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */ - -/* Constants definitions */ -#define RANK 2 - -#define IN_ORDER 1 -#define OUT_OF_ORDER 2 - -#define DATASET1 "DSET1" -#define DATASET2 "DSET2" -#define DATASET3 "DSET3" -#define DATASET4 "DSET4" -#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ -#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ -#define DXFER_BIGCOUNT (1 << 29) - -#define HYPER 1 -#define POINT 2 -#define ALL 3 - -/* Dataset data type. Int's can be easily octo dumped. */ -typedef hsize_t B_DATATYPE; - -int facc_type = FACC_MPIO; /*Test file access type */ -int dxfer_coll_type = DXFER_COLLECTIVE_IO; -size_t bigcount = (size_t) /* DXFER_BIGCOUNT */ 1310720; -int nerrors = 0; -static int mpi_size_g, mpi_rank_g; - -hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096 -hsize_t space_dim2 = SPACE_DIM2; - -static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, - int file_selection, int mem_selection, int mode); - -/* - * Setup the coordinates for point selection. - */ -static void -set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order) -{ - hsize_t i, j, k = 0, m, n, s1, s2; - - if (OUT_OF_ORDER == order) - k = (num_points * RANK) - 1; - else if (IN_ORDER == order) - k = 0; - - s1 = start[0]; - s2 = start[1]; - - for (i = 0; i < count[0]; i++) - for (j = 0; j < count[1]; j++) - for (m = 0; m < block[0]; m++) - for (n = 0; n < block[1]; n++) - if (OUT_OF_ORDER == order) { - coords[k--] = s2 + (stride[1] * j) + n; - coords[k--] = s1 + (stride[0] * i) + m; - } - else if (IN_ORDER == order) { - coords[k++] = s1 + stride[0] * i + m; - coords[k++] = s2 + stride[1] * j + n; - } -} - -/* - * Fill the dataset with trivial data for testing. - * Assume dimension rank is 2 and data is stored contiguous. - */ -static void -fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset) -{ - B_DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* put some trivial data in the data_array */ - for (i = 0; i < block[0]; i++) { - for (j = 0; j < block[1]; j++) { - *dataptr = (B_DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1)); - dataptr++; - } - } -} - -/* - * Setup the coordinates for point selection. - */ -void -point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order) -{ - hsize_t i, j, k = 0, m, n, s1, s2; - - HDcompile_assert(RANK == 2); - - if (OUT_OF_ORDER == order) - k = (num_points * RANK) - 1; - else if (IN_ORDER == order) - k = 0; - - s1 = start[0]; - s2 = start[1]; - - for (i = 0; i < count[0]; i++) - for (j = 0; j < count[1]; j++) - for (m = 0; m < block[0]; m++) - for (n = 0; n < block[1]; n++) - if (OUT_OF_ORDER == order) { - coords[k--] = s2 + (stride[1] * j) + n; - coords[k--] = s1 + (stride[0] * i) + m; - } - else if (IN_ORDER == order) { - coords[k++] = s1 + stride[0] * i + m; - coords[k++] = s2 + stride[1] * j + n; - } - - if (VERBOSE_MED) { - printf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "total datapoints=%" PRIuHSIZE "\n", - start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1], - block[0] * block[1] * count[0] * count[1]); - k = 0; - for (i = 0; i < num_points; i++) { - printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); - k += 2; - } - } -} - -/* - * Print the content of the dataset. - */ -static void -dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset) -{ - B_DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* print the column heading */ - printf("%-8s", "Cols:"); - for (j = 0; j < block[1]; j++) { - printf("%3" PRIuHSIZE " ", start[1] + j); - } - printf("\n"); - - /* print the slab data */ - for (i = 0; i < block[0]; i++) { - printf("Row %2" PRIuHSIZE ": ", i + start[0]); - for (j = 0; j < block[1]; j++) { - printf("%" PRIuHSIZE " ", *dataptr++); - } - printf("\n"); - } -} - -/* - * Print the content of the dataset. - */ -static int -verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset, - B_DATATYPE *original) -{ - hsize_t i, j; - int vrfyerrs; - - /* print it if VERBOSE_MED */ - if (VERBOSE_MED) { - printf("verify_data dumping:::\n"); - printf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "count(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), " - "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n", - start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]); - printf("original values:\n"); - dataset_print(start, block, original); - printf("compared values:\n"); - dataset_print(start, block, dataset); - } - - vrfyerrs = 0; - for (i = 0; i < block[0]; i++) { - for (j = 0; j < block[1]; j++) { - if (*dataset != *original) { - if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { - printf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]" - "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): " - "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n", - i, j, i + start[0], j + start[1], *(original), *(dataset)); - } - dataset++; - original++; - } - } - } - if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if (vrfyerrs) - printf("%d errors found in verify_data\n", vrfyerrs); - return (vrfyerrs); -} - -/* Set up the selection */ -static void -ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - int mode) -{ - - switch (mode) { - - case BYROW_CONT: - /* Each process takes a slabs of rows. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = space_dim1; - count[1] = space_dim2; - start[0] = (hsize_t)mpi_rank * count[0]; - start[1] = 0; - - break; - - case BYROW_DISCONT: - /* Each process takes several disjoint blocks. */ - block[0] = 1; - block[1] = 1; - stride[0] = 3; - stride[1] = 3; - count[0] = space_dim1 / (stride[0] * block[0]); - count[1] = (space_dim2) / (stride[1] * block[1]); - start[0] = space_dim1 * (hsize_t)mpi_rank; - start[1] = 0; - - break; - - case BYROW_SELECTNONE: - /* Each process takes a slabs of rows, there are - no selections for the last process. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : space_dim1); - count[1] = space_dim2; - start[0] = (hsize_t)mpi_rank * count[0]; - start[1] = 0; - - break; - - case BYROW_SELECTUNBALANCE: - /* The first one-third of the number of processes only - select top half of the domain, The rest will select the bottom - half of the domain. */ - - block[0] = 1; - count[0] = 2; - stride[0] = (hsize_t)(space_dim1 * (hsize_t)mpi_size / 4 + 1); - block[1] = space_dim2; - count[1] = 1; - start[1] = 0; - stride[1] = 1; - if ((mpi_rank * 3) < (mpi_size * 2)) - start[0] = (hsize_t)mpi_rank; - else - start[0] = 1 + space_dim1 * (hsize_t)mpi_size / 2 + (hsize_t)(mpi_rank - 2 * mpi_size / 3); - break; - - case BYROW_SELECTINCHUNK: - /* Each process will only select one chunk */ - - block[0] = 1; - count[0] = 1; - start[0] = (hsize_t)mpi_rank * space_dim1; - stride[0] = 1; - block[1] = space_dim2; - count[1] = 1; - stride[1] = 1; - start[1] = 0; - - break; - - default: - /* Unknown mode. Set it to cover the whole dataset. */ - block[0] = space_dim1 * (hsize_t)mpi_size; - block[1] = space_dim2; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = 0; - - break; - } - if (VERBOSE_MED) { - printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " - "datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); - } -} - -/* - * Fill the dataset with trivial data for testing. - * Assume dimension rank is 2. - */ -static void -ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset, - int mem_selection) -{ - DATATYPE *dataptr = dataset; - DATATYPE *tmptr; - hsize_t i, j, k1, k2, k = 0; - /* put some trivial data in the data_array */ - tmptr = dataptr; - - /* assign the disjoint block (two-dimensional)data array value - through the pointer */ - - for (k1 = 0; k1 < count[0]; k1++) { - for (i = 0; i < block[0]; i++) { - for (k2 = 0; k2 < count[1]; k2++) { - for (j = 0; j < block[1]; j++) { - - if (ALL != mem_selection) { - dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + - k2 * stride[1] + j); - } - else { - dataptr = tmptr + k; - k++; - } - - *dataptr = (DATATYPE)(k1 + k2 + i + j); - } - } - } - } -} - -/* - * Print the first block of the content of the dataset. - */ -static void -ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) - -{ - DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* print the column heading */ - printf("Print only the first block of the dataset\n"); - printf("%-8s", "Cols:"); - for (j = 0; j < block[1]; j++) { - printf("%3lu ", (unsigned long)(start[1] + j)); - } - printf("\n"); - - /* print the slab data */ - for (i = 0; i < block[0]; i++) { - printf("Row %2lu: ", (unsigned long)(i + start[0])); - for (j = 0; j < block[1]; j++) { - printf("%03d ", *dataptr++); - } - printf("\n"); - } -} - -/* - * Print the content of the dataset. - */ -static int -ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, - DATATYPE *original, int mem_selection) -{ - hsize_t i, j, k1, k2, k = 0; - int vrfyerrs; - DATATYPE *dataptr, *oriptr; - - /* print it if VERBOSE_MED */ - if (VERBOSE_MED) { - printf("dataset_vrfy dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); - ccdataset_print(start, block, original); - printf("compared values:\n"); - ccdataset_print(start, block, dataset); - } - - vrfyerrs = 0; - - for (k1 = 0; k1 < count[0]; k1++) { - for (i = 0; i < block[0]; i++) { - for (k2 = 0; k2 < count[1]; k2++) { - for (j = 0; j < block[1]; j++) { - if (ALL != mem_selection) { - dataptr = dataset + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + - k2 * stride[1] + j); - oriptr = original + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + - k2 * stride[1] + j); - } - else { - dataptr = dataset + k; - oriptr = original + k; - k++; - } - if (*dataptr != *oriptr) { - if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { - printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); - } - } - } - } - } - } - if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if (vrfyerrs) - printf("%d errors found in ccdataset_vrfy\n", vrfyerrs); - return (vrfyerrs); -} - -/* - * Example of using the parallel HDF5 library to create two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and - * each process controls a hyperslab within.] - */ - -static void -dataset_big_write(void) -{ - - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset; - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - hsize_t *coords = NULL; - herr_t ret; /* Generic return value */ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - size_t num_points; - B_DATATYPE *wdata; - - /* allocate memory for data buffer */ - wdata = (B_DATATYPE *)malloc(bigcount * sizeof(B_DATATYPE)); - VRFY_G((wdata != NULL), "wdata malloc succeeded"); - - /* setup file access template */ - acc_tpl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS"); - H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL); - - /* create the file collectively */ - fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY_G((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY_G((ret >= 0), ""); - - /* Each process takes a slabs of rows. */ - if (mpi_rank_g == 0) - printf("\nTesting Dataset1 write by ROW\n"); - /* Create a large dataset */ - dims[0] = bigcount; - dims[1] = (hsize_t)mpi_size_g; - - sid = H5Screate_simple(RANK, dims, NULL); - VRFY_G((sid >= 0), "H5Screate_simple succeeded"); - dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); - H5Sclose(sid); - - block[0] = dims[0] / (hsize_t)mpi_size_g; - block[1] = dims[1]; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)mpi_rank_g * block[0]; - start[1] = 0; - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY_G((mem_dataspace >= 0), ""); - - /* fill the local slab with some trivial data */ - fill_datasets(start, block, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, wdata); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); - VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - /* Each process takes a slabs of cols. */ - if (mpi_rank_g == 0) - printf("\nTesting Dataset2 write by COL\n"); - /* Create a large dataset */ - dims[0] = bigcount; - dims[1] = (hsize_t)mpi_size_g; - - sid = H5Screate_simple(RANK, dims, NULL); - VRFY_G((sid >= 0), "H5Screate_simple succeeded"); - dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); - H5Sclose(sid); - - block[0] = dims[0]; - block[1] = dims[1] / (hsize_t)mpi_size_g; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = (hsize_t)mpi_rank_g * block[1]; - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY_G((mem_dataspace >= 0), ""); - - /* fill the local slab with some trivial data */ - fill_datasets(start, block, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, wdata); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); - VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - /* ALL selection */ - if (mpi_rank_g == 0) - printf("\nTesting Dataset3 write select ALL proc 0, NONE others\n"); - /* Create a large dataset */ - dims[0] = bigcount; - dims[1] = 1; - - sid = H5Screate_simple(RANK, dims, NULL); - VRFY_G((sid >= 0), "H5Screate_simple succeeded"); - dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); - H5Sclose(sid); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - if (mpi_rank_g == 0) { - ret = H5Sselect_all(file_dataspace); - VRFY_G((ret >= 0), "H5Sset_all succeeded"); - } - else { - ret = H5Sselect_none(file_dataspace); - VRFY_G((ret >= 0), "H5Sset_none succeeded"); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, dims, NULL); - VRFY_G((mem_dataspace >= 0), ""); - if (mpi_rank_g != 0) { - ret = H5Sselect_none(mem_dataspace); - VRFY_G((ret >= 0), "H5Sset_none succeeded"); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - /* fill the local slab with some trivial data */ - fill_datasets(start, dims, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - } - - ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); - VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - /* Point selection */ - if (mpi_rank_g == 0) - printf("\nTesting Dataset4 write point selection\n"); - /* Create a large dataset */ - dims[0] = bigcount; - dims[1] = (hsize_t)(mpi_size_g * 4); - - sid = H5Screate_simple(RANK, dims, NULL); - VRFY_G((sid >= 0), "H5Screate_simple succeeded"); - dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); - H5Sclose(sid); - - block[0] = dims[0] / 2; - block[1] = 2; - stride[0] = dims[0] / 2; - stride[1] = 2; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g; - - num_points = bigcount; - - coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t)); - VRFY_G((coords != NULL), "coords malloc succeeded"); - - set_coords(start, count, stride, block, num_points, coords, IN_ORDER); - /* create a file dataspace */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY_G((ret >= 0), "H5Sselect_elements succeeded"); - - if (coords) - free(coords); - - fill_datasets(start, block, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, wdata); - } - - /* create a memory dataspace */ - /* Warning: H5Screate_simple requires an array of hsize_t elements - * even if we only pass only a single value. Attempting anything else - * appears to cause problems with 32 bit compilers. - */ - mem_dataspace = H5Screate_simple(1, dims, NULL); - VRFY_G((mem_dataspace >= 0), ""); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); - VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - free(wdata); - H5Fclose(fid); -} - -/* - * Example of using the parallel HDF5 library to read two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and - * each process controls a hyperslab within.] - */ - -static void -dataset_big_read(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset; - B_DATATYPE *rdata = NULL; /* data buffer */ - B_DATATYPE *wdata = NULL; /* expected data buffer */ - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - size_t num_points; - hsize_t *coords = NULL; - herr_t ret; /* Generic return value */ - - /* allocate memory for data buffer */ - rdata = (B_DATATYPE *)malloc(bigcount * sizeof(B_DATATYPE)); - VRFY_G((rdata != NULL), "rdata malloc succeeded"); - wdata = (B_DATATYPE *)malloc(bigcount * sizeof(B_DATATYPE)); - VRFY_G((wdata != NULL), "wdata malloc succeeded"); - - memset(rdata, 0, bigcount * sizeof(B_DATATYPE)); - - /* setup file access template */ - acc_tpl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS"); - H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL); - - /* open the file collectively */ - fid = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_tpl); - VRFY_G((fid >= 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY_G((ret >= 0), ""); - - if (mpi_rank_g == 0) - printf("\nRead Testing Dataset1 by COL\n"); - - dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); - - dims[0] = bigcount; - dims[1] = (hsize_t)mpi_size_g; - /* Each process takes a slabs of cols. */ - block[0] = dims[0]; - block[1] = dims[1] / (hsize_t)mpi_size_g; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = (hsize_t)mpi_rank_g * block[1]; - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY_G((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - fill_datasets(start, block, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); - VRFY_G((ret >= 0), "H5Dread dataset1 succeeded"); - - /* verify the read data with original expected data */ - ret = verify_data(start, count, stride, block, rdata, wdata); - if (ret) { - fprintf(stderr, "verify failed\n"); - exit(1); - } - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - if (mpi_rank_g == 0) - printf("\nRead Testing Dataset2 by ROW\n"); - memset(rdata, 0, bigcount * sizeof(B_DATATYPE)); - dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); - - dims[0] = bigcount; - dims[1] = (hsize_t)mpi_size_g; - /* Each process takes a slabs of rows. */ - block[0] = dims[0] / (hsize_t)mpi_size_g; - block[1] = dims[1]; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)mpi_rank_g * block[0]; - start[1] = 0; - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY_G((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - fill_datasets(start, block, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); - VRFY_G((ret >= 0), "H5Dread dataset2 succeeded"); - - /* verify the read data with original expected data */ - ret = verify_data(start, count, stride, block, rdata, wdata); - if (ret) { - fprintf(stderr, "verify failed\n"); - exit(1); - } - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - if (mpi_rank_g == 0) - printf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n"); - memset(rdata, 0, bigcount * sizeof(B_DATATYPE)); - dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); - - dims[0] = bigcount; - dims[1] = 1; - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - if (mpi_rank_g == 0) { - ret = H5Sselect_all(file_dataspace); - VRFY_G((ret >= 0), "H5Sset_all succeeded"); - } - else { - ret = H5Sselect_none(file_dataspace); - VRFY_G((ret >= 0), "H5Sset_none succeeded"); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, dims, NULL); - VRFY_G((mem_dataspace >= 0), ""); - if (mpi_rank_g != 0) { - ret = H5Sselect_none(mem_dataspace); - VRFY_G((ret >= 0), "H5Sset_none succeeded"); - } - - /* fill dataset with test data */ - fill_datasets(start, dims, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); - VRFY_G((ret >= 0), "H5Dread dataset3 succeeded"); - - if (mpi_rank_g == 0) { - /* verify the read data with original expected data */ - ret = verify_data(start, count, stride, block, rdata, wdata); - if (ret) { - fprintf(stderr, "verify failed\n"); - exit(1); - } - } - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - if (mpi_rank_g == 0) - printf("\nRead Testing Dataset4 with Point selection\n"); - dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT); - VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); - - dims[0] = bigcount; - dims[1] = (hsize_t)(mpi_size_g * 4); - - block[0] = dims[0] / 2; - block[1] = 2; - stride[0] = dims[0] / 2; - stride[1] = 2; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g; - - fill_datasets(start, block, wdata); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, wdata); - } - - num_points = bigcount; - - coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t)); - VRFY_G((coords != NULL), "coords malloc succeeded"); - - set_coords(start, count, stride, block, num_points, coords, IN_ORDER); - /* create a file dataspace */ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY_G((ret >= 0), "H5Sselect_elements succeeded"); - - if (coords) - free(coords); - - /* create a memory dataspace */ - /* Warning: H5Screate_simple requires an array of hsize_t elements - * even if we only pass only a single value. Attempting anything else - * appears to cause problems with 32 bit compilers. - */ - mem_dataspace = H5Screate_simple(1, dims, NULL); - VRFY_G((mem_dataspace >= 0), ""); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); - VRFY_G((ret >= 0), "H5Dread dataset1 succeeded"); - - ret = verify_data(start, count, stride, block, rdata, wdata); - if (ret) { - fprintf(stderr, "verify failed\n"); - exit(1); - } - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - - free(wdata); - free(rdata); - - wdata = NULL; - rdata = NULL; - /* We never wrote Dataset5 in the write section, so we can't - * expect to read it... - */ - file_dataspace = -1; - mem_dataspace = -1; - xfer_plist = -1; - dataset = -1; - - /* release all temporary handles. */ - if (file_dataspace != -1) - H5Sclose(file_dataspace); - if (mem_dataspace != -1) - H5Sclose(mem_dataspace); - if (xfer_plist != -1) - H5Pclose(xfer_plist); - if (dataset != -1) { - ret = H5Dclose(dataset); - VRFY_G((ret >= 0), "H5Dclose1 succeeded"); - } - H5Fclose(fid); - - /* release data buffers */ - if (rdata) - free(rdata); - if (wdata) - free(wdata); - -} /* dataset_large_readAll */ - -static void -single_rank_independent_io(void) -{ - if (mpi_rank_g == 0) - printf("single_rank_independent_io\n"); - - if (MAIN_PROCESS) { - hsize_t dims[1]; - hid_t file_id = -1; - hid_t fapl_id = -1; - hid_t dset_id = -1; - hid_t fspace_id = -1; - herr_t ret; - int *data = NULL; - uint64_t i; - - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS"); - - H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL); - file_id = H5Fcreate(FILENAME[1], H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY_G((file_id >= 0), "H5Dcreate2 succeeded"); - - /* - * Calculate the number of elements needed to exceed - * MPI's INT_MAX limitation - */ - dims[0] = (INT_MAX / sizeof(int)) + 10; - - fspace_id = H5Screate_simple(1, dims, NULL); - VRFY_G((fspace_id >= 0), "H5Screate_simple fspace_id succeeded"); - - /* - * Create and write to a >2GB dataset from a single rank. - */ - dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - - VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded"); - - data = malloc(dims[0] * sizeof(int)); - - /* Initialize data */ - for (i = 0; i < dims[0]; i++) - data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT); - - /* Write data */ - ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data); - VRFY_G((ret >= 0), "H5Dwrite succeeded"); - - /* Wipe buffer */ - memset(data, 0, dims[0] * sizeof(int)); - - /* Read data back */ - ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data); - VRFY_G((ret >= 0), "H5Dread succeeded"); - - /* Verify data */ - for (i = 0; i < dims[0]; i++) - if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) { - fprintf(stderr, "verify failed\n"); - exit(1); - } - - free(data); - H5Sclose(fspace_id); - H5Dclose(dset_id); - H5Fclose(file_id); - - H5Fdelete(FILENAME[1], fapl_id); - - H5Pclose(fapl_id); - } - MPI_Barrier(MPI_COMM_WORLD); -} - -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = -1; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO) { - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY_G((ret >= 0), ""); - ret = H5Pset_all_coll_metadata_ops(ret_pl, true); - VRFY_G((ret >= 0), ""); - ret = H5Pset_coll_metadata_write(ret_pl, true); - VRFY_G((ret >= 0), ""); - return (ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { - hid_t mpio_pl; - - mpio_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY_G((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY_G((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return (ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk1 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with a single chunk - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: One big singular selection inside one chunk - * Two dimensions, - * - * dim1 = space_dim1(5760)*mpi_size - * dim2 = space_dim2(3) - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = space_dim1(5760) - * count1 = space_dim2(3) - * start0 = mpi_rank*space_dim1 - * start1 = 0 - * ------------------------------------------------------------------------ - */ - -void -coll_chunk1(void) -{ - const char *filename = FILENAME[0]; - if (mpi_rank_g == 0) - printf("coll_chunk1\n"); - - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk2 - * - * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT - selection with a single chunk - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: many disjoint selections inside one chunk - * Two dimensions, - * - * dim1 = space_dim1*mpi_size(5760) - * dim2 = space_dim2(3) - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 3 for all dimensions - * count0 = space_dim1/stride0(5760/3) - * count1 = space_dim2/stride(3/3 = 1) - * start0 = mpi_rank*space_dim1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ -void -coll_chunk2(void) -{ - const char *filename = FILENAME[0]; - if (mpi_rank_g == 0) - printf("coll_chunk2\n"); - - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk3 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = space_dim1*mpi_size - * dim2 = space_dim2(3) - * chunk_dim1 = space_dim1 - * chunk_dim2 = dim2/2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = space_dim1 - * count1 = space_dim2(3) - * start0 = mpi_rank*space_dim1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk3(void) -{ - const char *filename = FILENAME[0]; - if (mpi_rank_g == 0) - printf("coll_chunk3\n"); - - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); -} - -//------------------------------------------------------------------------- -// Borrowed/Modified (slightly) from t_coll_chunk.c -/*------------------------------------------------------------------------- - * Function: coll_chunktest - * - * Purpose: The real testing routine for regular selection of collective - chunking storage - testing both write and read, - If anything fails, it may be read or write. There is no - separation test between read and write. - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ - -static void -coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection, - int mem_selection, int mode) -{ - hid_t file, dataset, file_dataspace, mem_dataspace; - hid_t acc_plist, xfer_plist, crp_plist; - - hsize_t dims[RANK], chunk_dims[RANK]; - int *data_array1 = NULL; - int *data_origin1 = NULL; - - hsize_t start[RANK], count[RANK], stride[RANK], block[RANK]; - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - unsigned prop_value; -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - herr_t status; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - - /* Create the data space */ - - acc_plist = create_faccess_plist(comm, info, facc_type); - VRFY_G((acc_plist >= 0), ""); - - file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist); - VRFY_G((file >= 0), "H5Fcreate succeeded"); - - status = H5Pclose(acc_plist); - VRFY_G((status >= 0), ""); - - /* setup dimensionality object */ - dims[0] = space_dim1 * (hsize_t)mpi_size_g; - dims[1] = space_dim2; - - /* allocate memory for data buffer */ - data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int)); - VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* set up dimensions of the slab this process accesses */ - ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor); - - /* set up the coords array selection */ - num_points = block[0] * block[1] * count[0] * count[1]; - coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t)); - VRFY_G((coords != NULL), "coords malloc succeeded"); - point_set(start, count, stride, block, num_points, coords, mode); - - /* Warning: H5Screate_simple requires an array of hsize_t elements - * even if we only pass only a single value. Attempting anything else - * appears to cause problems with 32 bit compilers. - */ - file_dataspace = H5Screate_simple(2, dims, NULL); - VRFY_G((file_dataspace >= 0), "file dataspace created succeeded"); - - if (ALL != mem_selection) { - mem_dataspace = H5Screate_simple(2, dims, NULL); - VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded"); - } - else { - /* Putting the warning about H5Screate_simple (above) into practice... */ - hsize_t dsdims[1] = {num_points}; - mem_dataspace = H5Screate_simple(1, dsdims, NULL); - VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded"); - } - - crp_plist = H5Pcreate(H5P_DATASET_CREATE); - VRFY_G((crp_plist >= 0), ""); - - /* Set up chunk information. */ - chunk_dims[0] = dims[0] / (hsize_t)chunk_factor; - - /* to decrease the testing time, maintain bigger chunk size */ - (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2 / 2); - status = H5Pset_chunk(crp_plist, 2, chunk_dims); - VRFY_G((status >= 0), "chunk creation property list succeeded"); - - dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT, - crp_plist, H5P_DEFAULT); - VRFY_G((dataset >= 0), "dataset created succeeded"); - - status = H5Pclose(crp_plist); - VRFY_G((status >= 0), ""); - - /*put some trivial data in the data array */ - ccdataset_fill(start, stride, count, block, data_array1, mem_selection); - - MESG("data_array initialized"); - - switch (file_selection) { - case HYPER: - status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY_G((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(file_dataspace); - VRFY_G((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(file_dataspace); - VRFY_G((status >= 0), "H5Sselect_all succeeded"); - break; - } - - switch (mem_selection) { - case HYPER: - status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY_G((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(mem_dataspace); - VRFY_G((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(mem_dataspace); - VRFY_G((status >= 0), "H5Sselect_all succeeded"); - break; - } - - /* set up the collective transfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), ""); - - status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((status >= 0), "MPIO collective transfer property succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((status >= 0), "set independent IO collectively succeeded"); - } - - switch (api_option) { - case API_LINK_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO); - VRFY_G((status >= 0), "collective chunk optimization succeeded"); - break; - - case API_MULTI_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO); - VRFY_G((status >= 0), "collective chunk optimization succeeded "); - break; - - case API_LINK_TRUE: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2); - VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); - break; - - case API_LINK_FALSE: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6); - VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); - break; - - case API_MULTI_COLL: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ - VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50); - VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded"); - break; - - case API_MULTI_IND: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ - VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100); - VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded"); - break; - - default:; - } - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - if (facc_type == FACC_MPIO) { - switch (api_option) { - case API_LINK_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY_G((status >= 0), "testing property list inserted succeeded"); - break; - - case API_MULTI_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY_G((status >= 0), "testing property list inserted succeeded"); - break; - - case API_LINK_TRUE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY_G((status >= 0), "testing property list inserted succeeded"); - break; - - case API_LINK_FALSE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY_G((status >= 0), "testing property list inserted succeeded"); - break; - - case API_MULTI_COLL: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, - H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY_G((status >= 0), "testing property list inserted succeeded"); - break; - - case API_MULTI_IND: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY_G((status >= 0), "testing property list inserted succeeded"); - break; - - default:; - } - } -#endif - - /* write data collectively */ - status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY_G((status >= 0), "dataset write succeeded"); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - if (facc_type == FACC_MPIO) { - switch (api_option) { - case API_LINK_HARD: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); - VRFY_G((status >= 0), "testing property list get succeeded"); - VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded"); - break; - - case API_MULTI_HARD: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value); - VRFY_G((status >= 0), "testing property list get succeeded"); - VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded"); - break; - - case API_LINK_TRUE: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value); - VRFY_G((status >= 0), "testing property list get succeeded"); - VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded"); - break; - - case API_LINK_FALSE: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value); - VRFY_G((status >= 0), "testing property list get succeeded"); - VRFY_G((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded"); - break; - - case API_MULTI_COLL: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value); - VRFY_G((status >= 0), "testing property list get succeeded"); - VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded"); - break; - - case API_MULTI_IND: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value); - VRFY_G((status >= 0), "testing property list get succeeded"); - VRFY_G((prop_value == 0), - "API to set MULTI-CHUNK IO transferring to independent IO succeeded"); - break; - - default:; - } - } -#endif - - status = H5Dclose(dataset); - VRFY_G((status >= 0), ""); - - status = H5Pclose(xfer_plist); - VRFY_G((status >= 0), "property list closed"); - - status = H5Sclose(file_dataspace); - VRFY_G((status >= 0), ""); - - status = H5Sclose(mem_dataspace); - VRFY_G((status >= 0), ""); - - status = H5Fclose(file); - VRFY_G((status >= 0), ""); - - if (data_array1) - free(data_array1); - - /* Use collective read to verify the correctness of collective write. */ - - /* allocate memory for data buffer */ - data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int)); - VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* allocate memory for data buffer */ - data_origin1 = (int *)malloc(dims[0] * dims[1] * sizeof(int)); - VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - acc_plist = create_faccess_plist(comm, info, facc_type); - VRFY_G((acc_plist >= 0), "MPIO creation property list succeeded"); - - file = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_plist); - VRFY_G((file >= 0), "H5Fcreate succeeded"); - - status = H5Pclose(acc_plist); - VRFY_G((status >= 0), ""); - - /* open the collective dataset*/ - dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT); - VRFY_G((dataset >= 0), ""); - - /* set up dimensions of the slab this process accesses */ - ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor); - - /* obtain the file and mem dataspace*/ - file_dataspace = H5Dget_space(dataset); - VRFY_G((file_dataspace >= 0), ""); - - if (ALL != mem_selection) { - mem_dataspace = H5Dget_space(dataset); - VRFY_G((mem_dataspace >= 0), ""); - } - else { - /* Warning: H5Screate_simple requires an array of hsize_t elements - * even if we only pass only a single value. Attempting anything else - * appears to cause problems with 32 bit compilers. - */ - hsize_t dsdims[1] = {num_points}; - mem_dataspace = H5Screate_simple(1, dsdims, NULL); - VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded"); - } - - switch (file_selection) { - case HYPER: - status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY_G((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(file_dataspace); - VRFY_G((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(file_dataspace); - VRFY_G((status >= 0), "H5Sselect_all succeeded"); - break; - } - - switch (mem_selection) { - case HYPER: - status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY_G((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY_G((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(mem_dataspace); - VRFY_G((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(mem_dataspace); - VRFY_G((status >= 0), "H5Sselect_all succeeded"); - break; - } - - /* fill dataset with test data */ - ccdataset_fill(start, stride, count, block, data_origin1, mem_selection); - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY_G((xfer_plist >= 0), ""); - - status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY_G((status >= 0), "MPIO collective transfer property succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY_G((status >= 0), "set independent IO collectively succeeded"); - } - - status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY_G((status >= 0), "dataset read succeeded"); - - /* verify the read data with original expected data */ - status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection); - if (status) - nerrors++; - - status = H5Pclose(xfer_plist); - VRFY_G((status >= 0), "property list closed"); - - /* close dataset collectively */ - status = H5Dclose(dataset); - VRFY_G((status >= 0), "H5Dclose"); - - /* release all IDs created */ - status = H5Sclose(file_dataspace); - VRFY_G((status >= 0), "H5Sclose"); - - status = H5Sclose(mem_dataspace); - VRFY_G((status >= 0), "H5Sclose"); - - /* close the file collectively */ - status = H5Fclose(file); - VRFY_G((status >= 0), "H5Fclose"); - - /* release data buffers */ - if (coords) - free(coords); - if (data_array1) - free(data_array1); - if (data_origin1) - free(data_origin1); -} - -int -main(int argc, char **argv) -{ - hid_t acc_plist = H5I_INVALID_HID; - - MPI_Init(&argc, &argv); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_g); - - /* Attempt to turn off atexit post processing so that in case errors - * happen during the test and the process is aborted, it will not get - * hung in the atexit post processing in which it may try to make MPI - * calls. By then, MPI calls may not work. - */ - if (H5dont_atexit() < 0) - printf("Failed to turn off atexit processing. Continue.\n"); - - /* set alarm. */ - /* TestAlarmOn(); */ - - acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - - /* Get the capability flag of the VOL connector being used */ - if (H5Pget_vol_cap_flags(acc_plist, &vol_cap_flags_g) < 0) { - if (MAIN_PROCESS) - printf("Failed to get the capability flag of the VOL connector being used\n"); - - MPI_Finalize(); - return 0; - } - - /* Make sure the connector supports the API functions being tested. This test only - * uses a few API functions, such as H5Fcreate/open/close/delete, H5Dcreate/write/read/close, - * and H5Dget_space. */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAIN_PROCESS) - printf( - "API functions for basic file, dataset basic or more aren't supported with this connector\n"); - - MPI_Finalize(); - return 0; - } - - dataset_big_write(); - MPI_Barrier(MPI_COMM_WORLD); - - dataset_big_read(); - MPI_Barrier(MPI_COMM_WORLD); - - coll_chunk1(); - MPI_Barrier(MPI_COMM_WORLD); - coll_chunk2(); - MPI_Barrier(MPI_COMM_WORLD); - coll_chunk3(); - MPI_Barrier(MPI_COMM_WORLD); - - single_rank_independent_io(); - - /* turn off alarm */ - /* TestAlarmOff(); */ - - if (mpi_rank_g == 0) { - hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS); - - H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL); - - H5E_BEGIN_TRY - { - H5Fdelete(FILENAME[0], fapl_id); - H5Fdelete(FILENAME[1], fapl_id); - } - H5E_END_TRY - - H5Pclose(fapl_id); - } - - H5Pclose(acc_plist); - - /* close HDF5 library */ - H5close(); - - MPI_Finalize(); - - return 0; -} diff --git a/testpar/API/t_chunk_alloc.c b/testpar/API/t_chunk_alloc.c deleted file mode 100644 index 673563b4f9e..00000000000 --- a/testpar/API/t_chunk_alloc.c +++ /dev/null @@ -1,507 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * This verifies if the storage space allocation methods are compatible between - * serial and parallel modes. - */ - -#include "hdf5.h" -#include "testphdf5.h" -static int mpi_size, mpi_rank; - -#define DSET_NAME "ExtendibleArray" -#define CHUNK_SIZE 1000 /* #elements per chunk */ -#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */ -#define CLOSE 1 -#define NO_CLOSE 0 - -#if 0 -static MPI_Offset -get_filesize(const char *filename) -{ - int mpierr; - MPI_File fd; - MPI_Offset filesize; - - mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fd); - VRFY((mpierr == MPI_SUCCESS), ""); - - mpierr = MPI_File_get_size(fd, &filesize); - VRFY((mpierr == MPI_SUCCESS), ""); - - mpierr = MPI_File_close(&fd); - VRFY((mpierr == MPI_SUCCESS), ""); - - return (filesize); -} -#endif - -typedef enum write_pattern { none, sec_last, all } write_type; - -typedef enum access_ { write_all, open_only, extend_only } access_type; - -/* - * This creates a dataset serially with chunks, each of CHUNK_SIZE - * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another - * routine will open this in parallel for extension test. - */ -static void -create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern) -{ - hid_t file_id, dataset; /* handles */ - hid_t dataspace, memspace; - hid_t cparms; - hsize_t dims[1]; - hsize_t maxdims[1] = {H5S_UNLIMITED}; - - hsize_t chunk_dims[1] = {CHUNK_SIZE}; - hsize_t count[1]; - hsize_t stride[1]; - hsize_t block[1]; - hsize_t offset[1]; /* Selection offset within dataspace */ - /* Variables used in reading data back */ - char buffer[CHUNK_SIZE]; - long nchunks; - herr_t hrc; -#if 0 - MPI_Offset filesize, /* actual file size */ - est_filesize; /* estimated file size */ -#endif - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Only MAINPROCESS should create the file. Others just wait. */ - if (MAINPROCESS) { - nchunks = chunk_factor * mpi_size; - dims[0] = (hsize_t)(nchunks * CHUNK_SIZE); - /* Create the data space with unlimited dimensions. */ - dataspace = H5Screate_simple(1, dims, maxdims); - VRFY((dataspace >= 0), ""); - - memspace = H5Screate_simple(1, chunk_dims, NULL); - VRFY((memspace >= 0), ""); - - /* Create a new file. If file exists its contents will be overwritten. */ - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - VRFY((file_id >= 0), "H5Fcreate"); - - /* Modify dataset creation properties, i.e. enable chunking */ - cparms = H5Pcreate(H5P_DATASET_CREATE); - VRFY((cparms >= 0), ""); - - hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY); - VRFY((hrc >= 0), ""); - - hrc = H5Pset_chunk(cparms, 1, chunk_dims); - VRFY((hrc >= 0), ""); - - /* Create a new dataset within the file using cparms creation properties. */ - dataset = - H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT); - VRFY((dataset >= 0), ""); - - if (write_pattern == sec_last) { - memset(buffer, 100, CHUNK_SIZE); - - count[0] = 1; - stride[0] = 1; - block[0] = chunk_dims[0]; - offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0]; - - hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); - VRFY((hrc >= 0), ""); - - /* Write sec_last chunk */ - hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); - VRFY((hrc >= 0), "H5Dwrite"); - } /* end if */ - - /* Close resources */ - hrc = H5Dclose(dataset); - VRFY((hrc >= 0), ""); - dataset = -1; - - hrc = H5Sclose(dataspace); - VRFY((hrc >= 0), ""); - - hrc = H5Sclose(memspace); - VRFY((hrc >= 0), ""); - - hrc = H5Pclose(cparms); - VRFY((hrc >= 0), ""); - - hrc = H5Fclose(file_id); - VRFY((hrc >= 0), ""); - file_id = -1; - -#if 0 - /* verify file size */ - filesize = get_filesize(filename); - est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); - VRFY((filesize >= est_filesize), "file size check"); -#endif - } - - /* Make sure all processes are done before exiting this routine. Otherwise, - * other tests may start and change the test data file before some processes - * of this test are still accessing the file. - */ - - MPI_Barrier(MPI_COMM_WORLD); -} - -/* - * This program performs three different types of parallel access. It writes on - * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only - * opens the dataset. At the end, it verifies the size of the dataset to be - * consistent with argument 'chunk_factor'. - */ -static void -parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id, - hid_t *dataset) -{ - hid_t memspace, dataspace; /* HDF5 file identifier */ - hid_t access_plist; /* HDF5 ID for file access property list */ - herr_t hrc; /* HDF5 return code */ - hsize_t size[1]; - - hsize_t chunk_dims[1] = {CHUNK_SIZE}; - hsize_t count[1]; - hsize_t stride[1]; - hsize_t block[1]; - hsize_t offset[1]; /* Selection offset within dataspace */ - hsize_t dims[1]; - hsize_t maxdims[1]; - - /* Variables used in reading data back */ - char buffer[CHUNK_SIZE]; - int i; - long nchunks; -#if 0 - /* MPI Gubbins */ - MPI_Offset filesize, /* actual file size */ - est_filesize; /* estimated file size */ -#endif - - /* Initialize MPI */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - nchunks = chunk_factor * mpi_size; - - /* Set up MPIO file access property lists */ - access_plist = H5Pcreate(H5P_FILE_ACCESS); - VRFY((access_plist >= 0), ""); - - hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); - VRFY((hrc >= 0), ""); - - /* Open the file */ - if (*file_id < 0) { - *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist); - VRFY((*file_id >= 0), ""); - } - - /* Open dataset*/ - if (*dataset < 0) { - *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT); - VRFY((*dataset >= 0), ""); - } - - /* Make sure all processes are done before continuing. Otherwise, one - * process could change the dataset extent before another finishes opening - * it, resulting in only some of the processes calling H5Dset_extent(). */ - MPI_Barrier(MPI_COMM_WORLD); - - memspace = H5Screate_simple(1, chunk_dims, NULL); - VRFY((memspace >= 0), ""); - - dataspace = H5Dget_space(*dataset); - VRFY((dataspace >= 0), ""); - - size[0] = (hsize_t)nchunks * CHUNK_SIZE; - - switch (action) { - - /* all chunks are written by all the processes in an interleaved way*/ - case write_all: - - memset(buffer, mpi_rank + 1, CHUNK_SIZE); - count[0] = 1; - stride[0] = 1; - block[0] = chunk_dims[0]; - for (i = 0; i < nchunks / mpi_size; i++) { - offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0]; - - hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); - VRFY((hrc >= 0), ""); - - /* Write the buffer out */ - hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); - VRFY((hrc >= 0), "H5Dwrite"); - } - - break; - - /* only extends the dataset */ - case extend_only: - /* check if new size is larger than old size */ - hrc = H5Sget_simple_extent_dims(dataspace, dims, maxdims); - VRFY((hrc >= 0), ""); - - /* Extend dataset*/ - if (size[0] > dims[0]) { - hrc = H5Dset_extent(*dataset, size); - VRFY((hrc >= 0), ""); - } - break; - - /* only opens the *dataset */ - case open_only: - break; - default: - assert(0); - } - - /* Close up */ - hrc = H5Dclose(*dataset); - VRFY((hrc >= 0), ""); - *dataset = -1; - - hrc = H5Sclose(dataspace); - VRFY((hrc >= 0), ""); - - hrc = H5Sclose(memspace); - VRFY((hrc >= 0), ""); - - hrc = H5Fclose(*file_id); - VRFY((hrc >= 0), ""); - *file_id = -1; - -#if 0 - /* verify file size */ - filesize = get_filesize(filename); - est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); - VRFY((filesize >= est_filesize), "file size check"); -#endif - - /* Can close some plists */ - hrc = H5Pclose(access_plist); - VRFY((hrc >= 0), ""); - - /* Make sure all processes are done before exiting this routine. Otherwise, - * other tests may start and change the test data file before some processes - * of this test are still accessing the file. - */ - MPI_Barrier(MPI_COMM_WORLD); -} - -/* - * This routine verifies the data written in the dataset. It does one of the - * three cases according to the value of parameter `write_pattern'. - * 1. it returns correct fill values though the dataset has not been written; - * 2. it still returns correct fill values though only a small part is written; - * 3. it returns correct values when the whole dataset has been written in an - * interleaved pattern. - */ -static void -verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id, - hid_t *dataset) -{ - hid_t dataspace, memspace; /* HDF5 file identifier */ - hid_t access_plist; /* HDF5 ID for file access property list */ - herr_t hrc; /* HDF5 return code */ - - hsize_t chunk_dims[1] = {CHUNK_SIZE}; - hsize_t count[1]; - hsize_t stride[1]; - hsize_t block[1]; - hsize_t offset[1]; /* Selection offset within dataspace */ - /* Variables used in reading data back */ - char buffer[CHUNK_SIZE]; - int value, i; - int index_l; - long nchunks; - /* Initialize MPI */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - nchunks = chunk_factor * mpi_size; - - /* Set up MPIO file access property lists */ - access_plist = H5Pcreate(H5P_FILE_ACCESS); - VRFY((access_plist >= 0), ""); - - hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); - VRFY((hrc >= 0), ""); - - /* Open the file */ - if (*file_id < 0) { - *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist); - VRFY((*file_id >= 0), ""); - } - - /* Open dataset*/ - if (*dataset < 0) { - *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT); - VRFY((*dataset >= 0), ""); - } - - memspace = H5Screate_simple(1, chunk_dims, NULL); - VRFY((memspace >= 0), ""); - - dataspace = H5Dget_space(*dataset); - VRFY((dataspace >= 0), ""); - - /* all processes check all chunks. */ - count[0] = 1; - stride[0] = 1; - block[0] = chunk_dims[0]; - for (i = 0; i < nchunks; i++) { - /* reset buffer values */ - memset(buffer, -1, CHUNK_SIZE); - - offset[0] = (hsize_t)i * chunk_dims[0]; - - hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); - VRFY((hrc >= 0), ""); - - /* Read the chunk */ - hrc = H5Dread(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); - VRFY((hrc >= 0), "H5Dread"); - - /* set expected value according the write pattern */ - switch (write_pattern) { - case all: - value = i % mpi_size + 1; - break; - case none: - value = 0; - break; - case sec_last: - if (i == nchunks - 2) - value = 100; - else - value = 0; - break; - default: - assert(0); - } - - /* verify content of the chunk */ - for (index_l = 0; index_l < CHUNK_SIZE; index_l++) - VRFY((buffer[index_l] == value), "data verification"); - } - - hrc = H5Sclose(dataspace); - VRFY((hrc >= 0), ""); - - hrc = H5Sclose(memspace); - VRFY((hrc >= 0), ""); - - /* Can close some plists */ - hrc = H5Pclose(access_plist); - VRFY((hrc >= 0), ""); - - /* Close up */ - if (vclose) { - hrc = H5Dclose(*dataset); - VRFY((hrc >= 0), ""); - *dataset = -1; - - hrc = H5Fclose(*file_id); - VRFY((hrc >= 0), ""); - *file_id = -1; - } - - /* Make sure all processes are done before exiting this routine. Otherwise, - * other tests may start and change the test data file before some processes - * of this test are still accessing the file. - */ - MPI_Barrier(MPI_COMM_WORLD); -} - -/* - * Test following possible scenarios, - * Case 1: - * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large - * size, no write, close, reopen in parallel, read to verify all return - * the fill value. - * Case 2: - * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY but small - * size, no write, close, reopen in parallel, extend to large size, then close, - * then reopen in parallel and read to verify all return the fill value. - * Case 3: - * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large - * size, write just a small part of the dataset (second to the last), close, - * then reopen in parallel, read to verify all return the fill value except - * those small portion that has been written. Without closing it, writes - * all parts of the dataset in a interleave pattern, close it, and reopen - * it, read to verify all data are as written. - */ -void -test_chunk_alloc(void) -{ - const char *filename; - hid_t file_id, dataset; - - file_id = dataset = -1; - - /* Initialize MPI */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Extend Chunked allocation test on file %s\n", filename); - - /* Case 1 */ - /* Create chunked dataset without writing anything.*/ - create_chunked_dataset(filename, CHUNK_FACTOR, none); - /* reopen dataset in parallel and check for file size */ - parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset); - /* reopen dataset in parallel, read and verify the data */ - verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset); - - /* Case 2 */ - /* Create chunked dataset without writing anything */ - create_chunked_dataset(filename, 20, none); - /* reopen dataset in parallel and only extend it */ - parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset); - /* reopen dataset in parallel, read and verify the data */ - verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset); - - /* Case 3 */ - /* Create chunked dataset and write in the second to last chunk */ - create_chunked_dataset(filename, CHUNK_FACTOR, sec_last); - /* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/ - verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset); - /* All processes write in all the chunks in a interleaved way */ - parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset); - /* reopen dataset in parallel, read and verify the data */ - verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset); -} diff --git a/testpar/API/t_coll_chunk.c b/testpar/API/t_coll_chunk.c deleted file mode 100644 index 99f845fde55..00000000000 --- a/testpar/API/t_coll_chunk.c +++ /dev/null @@ -1,1345 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#include "hdf5.h" -#include "testphdf5.h" - -#define HYPER 1 -#define POINT 2 -#define ALL 3 - -/* some commonly used routines for collective chunk IO tests*/ - -static void ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], - hsize_t block[], int mode); - -static void ccdataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - DATATYPE *dataset, int mem_selection); - -static void ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset); - -static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - DATATYPE *dataset, DATATYPE *original, int mem_selection); - -static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, - int file_selection, int mem_selection, int mode); - -/*------------------------------------------------------------------------- - * Function: coll_chunk1 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with a single chunk - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: One big singular selection inside one chunk - * Two dimensions, - * - * dim1 = SPACE_DIM1(5760)*mpi_size - * dim2 = SPACE_DIM2(3) - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1(5760) - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * ------------------------------------------------------------------------ - */ - -void -coll_chunk1(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk2 - * - * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT - selection with a single chunk - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: many disjoint selections inside one chunk - * Two dimensions, - * - * dim1 = SPACE_DIM1*mpi_size(5760) - * dim2 = SPACE_DIM2(3) - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 3 for all dimensions - * count0 = SPACE_DIM1/stride0(5760/3) - * count1 = SPACE_DIM2/stride(3/3 = 1) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ -void -coll_chunk2(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk3 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2(3) - * chunk_dim1 = SPACE_DIM1 - * chunk_dim2 = dim2/2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk3(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_size; - int mpi_rank; - - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk4 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2 - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk4(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk4 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2 - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk5(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk6 - * - * Purpose: Test direct request for multi-chunk-io. - * Wrapper to test the collective chunk IO for regular JOINT - * selection with at least number of 2*mpi_size chunks - * Test for direct to Multi Chunk I/O. - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2 - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk6(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk7 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2 - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk7(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk8 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2 - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk8(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk9 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2 - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk9(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunk10 - * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT - selection with at least number of 2*mpi_size chunks - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -/* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection across many chunks - * Two dimensions, Num of chunks = 2* mpi_size - * - * dim1 = SPACE_DIM1*mpi_size - * dim2 = SPACE_DIM2 - * chunk_dim1 = dim1 - * chunk_dim2 = dim2 - * block = 1 for all dimensions - * stride = 1 for all dimensions - * count0 = SPACE_DIM1 - * count1 = SPACE_DIM2(3) - * start0 = mpi_rank*SPACE_DIM1 - * start1 = 0 - * - * ------------------------------------------------------------------------ - */ - -void -coll_chunk10(void) -{ - const char *filename = PARATESTFILE /* GetTestParameters() */; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER); - - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER); -} - -/*------------------------------------------------------------------------- - * Function: coll_chunktest - * - * Purpose: The real testing routine for regular selection of collective - chunking storage - testing both write and read, - If anything fails, it may be read or write. There is no - separation test between read and write. - * - * Return: Success: 0 - * - * Failure: -1 - *------------------------------------------------------------------------- - */ - -static void -coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection, - int mem_selection, int mode) -{ - hid_t file, dataset, file_dataspace, mem_dataspace; - hid_t acc_plist, xfer_plist, crp_plist; - - hsize_t dims[RANK], chunk_dims[RANK]; - int *data_array1 = NULL; - int *data_origin1 = NULL; - - hsize_t start[RANK], count[RANK], stride[RANK], block[RANK]; - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - unsigned prop_value; -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - int mpi_size, mpi_rank; - - herr_t status; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - hsize_t current_dims; /* for point selection */ - - /* set up MPI parameters */ - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); - - /* Create the data space */ - - acc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((acc_plist >= 0), ""); - - file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist); - VRFY((file >= 0), "H5Fcreate succeeded"); - - status = H5Pclose(acc_plist); - VRFY((status >= 0), ""); - - /* setup dimensionality object */ - dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size); - dims[1] = SPACE_DIM2; - - /* allocate memory for data buffer */ - data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* set up dimensions of the slab this process accesses */ - ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); - - /* set up the coords array selection */ - num_points = block[0] * block[1] * count[0] * count[1]; - coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t)); - VRFY((coords != NULL), "coords malloc succeeded"); - point_set(start, count, stride, block, num_points, coords, mode); - - file_dataspace = H5Screate_simple(2, dims, NULL); - VRFY((file_dataspace >= 0), "file dataspace created succeeded"); - - if (ALL != mem_selection) { - mem_dataspace = H5Screate_simple(2, dims, NULL); - VRFY((mem_dataspace >= 0), "mem dataspace created succeeded"); - } - else { - current_dims = num_points; - mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); - VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); - } - - crp_plist = H5Pcreate(H5P_DATASET_CREATE); - VRFY((crp_plist >= 0), ""); - - /* Set up chunk information. */ - chunk_dims[0] = dims[0] / (hsize_t)chunk_factor; - - /* to decrease the testing time, maintain bigger chunk size */ - (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2); - status = H5Pset_chunk(crp_plist, 2, chunk_dims); - VRFY((status >= 0), "chunk creation property list succeeded"); - - dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT, - crp_plist, H5P_DEFAULT); - VRFY((dataset >= 0), "dataset created succeeded"); - - status = H5Pclose(crp_plist); - VRFY((status >= 0), ""); - - /*put some trivial data in the data array */ - ccdataset_fill(start, stride, count, block, data_array1, mem_selection); - - MESG("data_array initialized"); - - switch (file_selection) { - case HYPER: - status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(file_dataspace); - VRFY((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(file_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - switch (mem_selection) { - case HYPER: - status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(mem_dataspace); - VRFY((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(mem_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - /* set up the collective transfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - - status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((status >= 0), "MPIO collective transfer property succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((status >= 0), "set independent IO collectively succeeded"); - } - - switch (api_option) { - case API_LINK_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO); - VRFY((status >= 0), "collective chunk optimization succeeded"); - break; - - case API_MULTI_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO); - VRFY((status >= 0), "collective chunk optimization succeeded "); - break; - - case API_LINK_TRUE: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2); - VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); - break; - - case API_LINK_FALSE: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6); - VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); - break; - - case API_MULTI_COLL: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ - VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50); - VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded"); - break; - - case API_MULTI_IND: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ - VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100); - VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded"); - break; - - default:; - } - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - if (facc_type == FACC_MPIO) { - switch (api_option) { - case API_LINK_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0), "testing property list inserted succeeded"); - break; - - case API_MULTI_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0), "testing property list inserted succeeded"); - break; - - case API_LINK_TRUE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0), "testing property list inserted succeeded"); - break; - - case API_LINK_FALSE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0), "testing property list inserted succeeded"); - break; - - case API_MULTI_COLL: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, - H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0), "testing property list inserted succeeded"); - break; - - case API_MULTI_IND: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = - H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, - &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0), "testing property list inserted succeeded"); - break; - - default:; - } - } -#endif - - /* write data collectively */ - status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((status >= 0), "dataset write succeeded"); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - /* Only check chunk optimization mode if selection I/O is not being used - - * selection I/O bypasses this IO mode decision - it's effectively always - * multi chunk currently */ - if (facc_type == FACC_MPIO && /* !H5_use_selection_io_g */ true) { - switch (api_option) { - case API_LINK_HARD: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); - VRFY((status >= 0), "testing property list get succeeded"); - VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded"); - break; - - case API_MULTI_HARD: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value); - VRFY((status >= 0), "testing property list get succeeded"); - VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded"); - break; - - case API_LINK_TRUE: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value); - VRFY((status >= 0), "testing property list get succeeded"); - VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded"); - break; - - case API_LINK_FALSE: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value); - VRFY((status >= 0), "testing property list get succeeded"); - VRFY((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded"); - break; - - case API_MULTI_COLL: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value); - VRFY((status >= 0), "testing property list get succeeded"); - VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded"); - break; - - case API_MULTI_IND: - status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value); - VRFY((status >= 0), "testing property list get succeeded"); - VRFY((prop_value == 0), - "API to set MULTI-CHUNK IO transferring to independent IO succeeded"); - break; - - default:; - } - } -#endif - - status = H5Dclose(dataset); - VRFY((status >= 0), ""); - - status = H5Pclose(xfer_plist); - VRFY((status >= 0), "property list closed"); - - status = H5Sclose(file_dataspace); - VRFY((status >= 0), ""); - - status = H5Sclose(mem_dataspace); - VRFY((status >= 0), ""); - - status = H5Fclose(file); - VRFY((status >= 0), ""); - - if (data_array1) - free(data_array1); - - /* Use collective read to verify the correctness of collective write. */ - - /* allocate memory for data buffer */ - data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* allocate memory for data buffer */ - data_origin1 = (int *)malloc(dims[0] * dims[1] * sizeof(int)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - acc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((acc_plist >= 0), "MPIO creation property list succeeded"); - - file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist); - VRFY((file >= 0), "H5Fcreate succeeded"); - - status = H5Pclose(acc_plist); - VRFY((status >= 0), ""); - - /* open the collective dataset*/ - dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT); - VRFY((dataset >= 0), ""); - - /* set up dimensions of the slab this process accesses */ - ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); - - /* obtain the file and mem dataspace*/ - file_dataspace = H5Dget_space(dataset); - VRFY((file_dataspace >= 0), ""); - - if (ALL != mem_selection) { - mem_dataspace = H5Dget_space(dataset); - VRFY((mem_dataspace >= 0), ""); - } - else { - current_dims = num_points; - mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); - VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); - } - - switch (file_selection) { - case HYPER: - status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(file_dataspace); - VRFY((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(file_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - switch (mem_selection) { - case HYPER: - status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0), "hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0), "Element selection succeeded"); - } - else { - status = H5Sselect_none(mem_dataspace); - VRFY((status >= 0), "none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(mem_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - /* fill dataset with test data */ - ccdataset_fill(start, stride, count, block, data_origin1, mem_selection); - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - - status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((status >= 0), "MPIO collective transfer property succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((status >= 0), "set independent IO collectively succeeded"); - } - - status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((status >= 0), "dataset read succeeded"); - - /* verify the read data with original expected data */ - status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection); - if (status) - nerrors++; - - status = H5Pclose(xfer_plist); - VRFY((status >= 0), "property list closed"); - - /* close dataset collectively */ - status = H5Dclose(dataset); - VRFY((status >= 0), "H5Dclose"); - - /* release all IDs created */ - status = H5Sclose(file_dataspace); - VRFY((status >= 0), "H5Sclose"); - - status = H5Sclose(mem_dataspace); - VRFY((status >= 0), "H5Sclose"); - - /* close the file collectively */ - status = H5Fclose(file); - VRFY((status >= 0), "H5Fclose"); - - /* release data buffers */ - if (coords) - free(coords); - if (data_array1) - free(data_array1); - if (data_origin1) - free(data_origin1); -} - -/* Set up the selection */ -static void -ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - int mode) -{ - - switch (mode) { - - case BYROW_CONT: - /* Each process takes a slabs of rows. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = SPACE_DIM1; - count[1] = SPACE_DIM2; - start[0] = (hsize_t)mpi_rank * count[0]; - start[1] = 0; - - break; - - case BYROW_DISCONT: - /* Each process takes several disjoint blocks. */ - block[0] = 1; - block[1] = 1; - stride[0] = 3; - stride[1] = 3; - count[0] = SPACE_DIM1 / (stride[0] * block[0]); - count[1] = (SPACE_DIM2) / (stride[1] * block[1]); - start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank; - start[1] = 0; - - break; - - case BYROW_SELECTNONE: - /* Each process takes a slabs of rows, there are - no selections for the last process. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1); - count[1] = SPACE_DIM2; - start[0] = (hsize_t)mpi_rank * count[0]; - start[1] = 0; - - break; - - case BYROW_SELECTUNBALANCE: - /* The first one-third of the number of processes only - select top half of the domain, The rest will select the bottom - half of the domain. */ - - block[0] = 1; - count[0] = 2; - stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1; - block[1] = SPACE_DIM2; - count[1] = 1; - start[1] = 0; - stride[1] = 1; - if ((mpi_rank * 3) < (mpi_size * 2)) - start[0] = (hsize_t)mpi_rank; - else - start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3)); - break; - - case BYROW_SELECTINCHUNK: - /* Each process will only select one chunk */ - - block[0] = 1; - count[0] = 1; - start[0] = (hsize_t)(mpi_rank * SPACE_DIM1); - stride[0] = 1; - block[1] = SPACE_DIM2; - count[1] = 1; - stride[1] = 1; - start[1] = 0; - - break; - - default: - /* Unknown mode. Set it to cover the whole dataset. */ - block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size; - block[1] = SPACE_DIM2; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = 0; - - break; - } - if (VERBOSE_MED) { - printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " - "datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); - } -} - -/* - * Fill the dataset with trivial data for testing. - * Assume dimension rank is 2. - */ -static void -ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset, - int mem_selection) -{ - DATATYPE *dataptr = dataset; - DATATYPE *tmptr; - hsize_t i, j, k1, k2, k = 0; - /* put some trivial data in the data_array */ - tmptr = dataptr; - - /* assign the disjoint block (two-dimensional)data array value - through the pointer */ - - for (k1 = 0; k1 < count[0]; k1++) { - for (i = 0; i < block[0]; i++) { - for (k2 = 0; k2 < count[1]; k2++) { - for (j = 0; j < block[1]; j++) { - - if (ALL != mem_selection) { - dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + - k2 * stride[1] + j); - } - else { - dataptr = tmptr + k; - k++; - } - - *dataptr = (DATATYPE)(k1 + k2 + i + j); - } - } - } - } -} - -/* - * Print the first block of the content of the dataset. - */ -static void -ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) - -{ - DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* print the column heading */ - printf("Print only the first block of the dataset\n"); - printf("%-8s", "Cols:"); - for (j = 0; j < block[1]; j++) { - printf("%3lu ", (unsigned long)(start[1] + j)); - } - printf("\n"); - - /* print the slab data */ - for (i = 0; i < block[0]; i++) { - printf("Row %2lu: ", (unsigned long)(i + start[0])); - for (j = 0; j < block[1]; j++) { - printf("%03d ", *dataptr++); - } - printf("\n"); - } -} - -/* - * Print the content of the dataset. - */ -static int -ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, - DATATYPE *original, int mem_selection) -{ - hsize_t i, j, k1, k2, k = 0; - int vrfyerrs; - DATATYPE *dataptr, *oriptr; - - /* print it if VERBOSE_MED */ - if (VERBOSE_MED) { - printf("dataset_vrfy dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); - ccdataset_print(start, block, original); - printf("compared values:\n"); - ccdataset_print(start, block, dataset); - } - - vrfyerrs = 0; - - for (k1 = 0; k1 < count[0]; k1++) { - for (i = 0; i < block[0]; i++) { - for (k2 = 0; k2 < count[1]; k2++) { - for (j = 0; j < block[1]; j++) { - if (ALL != mem_selection) { - dataptr = dataset + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + - k2 * stride[1] + j); - oriptr = original + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + - k2 * stride[1] + j); - } - else { - dataptr = dataset + k; - oriptr = original + k; - k++; - } - if (*dataptr != *oriptr) { - if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { - printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); - } - } - } - } - } - } - if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if (vrfyerrs) - printf("%d errors found in ccdataset_vrfy\n", vrfyerrs); - return (vrfyerrs); -} diff --git a/testpar/API/t_coll_md_read.c b/testpar/API/t_coll_md_read.c deleted file mode 100644 index 353d5f67a9b..00000000000 --- a/testpar/API/t_coll_md_read.c +++ /dev/null @@ -1,624 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * A test suite to test HDF5's collective metadata read and write capabilities, - * as enabled by making a call to H5Pset_all_coll_metadata_ops() and/or - * H5Pset_coll_metadata_write(). - */ - -#include "hdf5.h" -#include "testphdf5.h" - -#include -#include -#include - -/* - * Define the non-participating process as the "last" - * rank to avoid any weirdness potentially caused by - * an if (mpi_rank == 0) check. - */ -#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1) -#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset" -#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2 -#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5 -#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5 - -#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2 - -#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000 -#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue" -#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1 - -#define COLL_GHEAP_WRITE_ATTR_NELEMS 10 -#define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr" -#define COLL_GHEAP_WRITE_ATTR_DIMS 1 - -/* - * A test for issue HDFFV-10501. A parallel hang was reported which occurred - * in linked-chunk I/O when collective metadata reads are enabled and some ranks - * do not have any selection in a dataset's dataspace, while others do. The ranks - * which have no selection during the read/write operation called H5D__chunk_addrmap() - * to retrieve the lowest chunk address, since we require that the read/write be done - * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees, - * this caused the non-participating ranks to issue a collective MPI_Bcast() call - * which the other ranks did not issue, thus causing a hang. - * - * However, since these ranks are not actually reading/writing anything, this call - * can simply be removed and the address used for the read/write can be set to an - * arbitrary number (0 was chosen). - */ -void -test_partial_no_selection_coll_md_read(void) -{ - const char *filename; - hsize_t *dataset_dims = NULL; - hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS]; - hsize_t sel_dims[1]; - hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE, - PARTIAL_NO_SELECTION_X_DIM_SCALE}; - hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS]; - hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS]; - hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS]; - hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS]; - hid_t file_id = H5I_INVALID_HID; - hid_t fapl_id = H5I_INVALID_HID; - hid_t dset_id = H5I_INVALID_HID; - hid_t dcpl_id = H5I_INVALID_HID; - hid_t dxpl_id = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - int mpi_rank, mpi_size; - void *data = NULL; - void *read_buf = NULL; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or file flush aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - filename = PARATESTFILE /* GetTestParameters() */; - - fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); - - /* - * Even though the testphdf5 framework currently sets collective metadata reads - * on the FAPL, we call it here just to be sure this is futureproof, since - * demonstrating this issue relies upon it. - */ - VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - - dataset_dims = malloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims)); - VRFY((dataset_dims != NULL), "malloc succeeded"); - - dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size; - dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size; - max_dataset_dims[0] = H5S_UNLIMITED; - max_dataset_dims[1] = H5S_UNLIMITED; - - fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims); - VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - - /* - * Set up chunking on the dataset in order to reproduce the problem. - */ - dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); - - VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), - "H5Pset_chunk succeeded"); - - dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, - dcpl_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); - - /* - * Setup hyperslab selection to split the dataset among the ranks. - * - * The ranks will write rows across the dataset. - */ - start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank; - start[1] = 0; - stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; - stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; - count[0] = 1; - count[1] = (hsize_t)mpi_size; - block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; - block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; - - VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), - "H5Sselect_hyperslab succeeded"); - - sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE); - - mspace_id = H5Screate_simple(1, sel_dims, NULL); - VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); - - data = calloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * - sizeof(int)); - VRFY((data != NULL), "calloc succeeded"); - - dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); - - /* - * Enable collective access for the data transfer. - */ - VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); - - VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); - - VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); - - /* - * Ensure that linked-chunk I/O is performed since this is - * the particular code path where the issue lies and we don't - * want the library doing multi-chunk I/O behind our backs. - */ - VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), - "H5Pset_dxpl_mpio_chunk_opt succeeded"); - - read_buf = malloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * - sizeof(int)); - VRFY((read_buf != NULL), "malloc succeeded"); - - /* - * Make sure to call H5Sselect_none() on the non-participating process. - */ - if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { - VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded"); - VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded"); - } - - /* - * Finally have each rank read their section of data back from the dataset. - */ - VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), - "H5Dread succeeded"); - - /* - * Check data integrity just to be sure. - */ - if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { - VRFY((!memcmp(data, read_buf, - count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * - sizeof(int))), - "memcmp succeeded"); - } - - if (dataset_dims) { - free(dataset_dims); - dataset_dims = NULL; - } - - if (data) { - free(data); - data = NULL; - } - - if (read_buf) { - free(read_buf); - read_buf = NULL; - } - - VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); - VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); - VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); - VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); -} - -/* - * A test for HDFFV-10562 which attempts to verify that using multi-chunk - * I/O with collective metadata reads enabled doesn't causes issues due to - * collective metadata reads being made only by process 0 in H5D__chunk_addrmap(). - * - * Failure in this test may either cause a hang, or, due to how the MPI calls - * pertaining to this issue might mistakenly match up, may cause an MPI error - * message similar to: - * - * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed - * major: Internal error (too specific to document in detail) - * minor: Some MPI function failed - * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack: - *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006) - *failed MPIR_Bcast_impl(1452).............: MPIR_Bcast(1476)..................: - *MPIR_Bcast_intra(1249)............: - *MPIR_SMP_Bcast(1088)..............: - *MPIR_Bcast_binomial(239)..........: - *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer - *size is 18 major: Internal error (too specific to document in detail) minor: MPI Error String - * - */ -void -test_multi_chunk_io_addrmap_issue(void) -{ - const char *filename; - hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; - hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; - hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; - hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; - hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5}; - hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5}; - hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED}; - hid_t file_id = H5I_INVALID_HID; - hid_t fapl_id = H5I_INVALID_HID; - hid_t dset_id = H5I_INVALID_HID; - hid_t dcpl_id = H5I_INVALID_HID; - hid_t dxpl_id = H5I_INVALID_HID; - hid_t space_id = H5I_INVALID_HID; - void *read_buf = NULL; - int mpi_rank; - int data[5][5] = {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}}; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or file flush aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - filename = PARATESTFILE /* GetTestParameters() */; - - fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); - - /* - * Even though the testphdf5 framework currently sets collective metadata reads - * on the FAPL, we call it here just to be sure this is futureproof, since - * demonstrating this issue relies upon it. - */ - VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - - space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims); - VRFY((space_id >= 0), "H5Screate_simple succeeded"); - - dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); - - VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0), - "H5Pset_chunk succeeded"); - - dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); - - dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); - - VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); - VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0), - "H5Pset_dxpl_mpio_chunk_opt succeeded"); - - start[1] = 0; - stride[0] = stride[1] = 1; - count[0] = count[1] = 5; - block[0] = block[1] = 1; - - if (mpi_rank == 0) - start[0] = 0; - else - start[0] = 5; - - VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0), - "H5Sselect_hyperslab succeeded"); - if (mpi_rank != 0) - VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded"); - - VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); - - VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); - - read_buf = malloc(50 * sizeof(int)); - VRFY((read_buf != NULL), "malloc succeeded"); - - VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded"); - - if (read_buf) { - free(read_buf); - read_buf = NULL; - } - - VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded"); - VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); - VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); -} - -/* - * A test for HDFFV-10562 which attempts to verify that using linked-chunk - * I/O with collective metadata reads enabled doesn't cause issues due to - * collective metadata reads being made only by process 0 in H5D__sort_chunk(). - * - * Failure in this test may either cause a hang, or, due to how the MPI calls - * pertaining to this issue might mistakenly match up, may cause an MPI error - * message similar to: - * - * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed - * major: Internal error (too specific to document in detail) - * minor: Some MPI function failed - * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack: - *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed - *MPIR_Bcast_impl(1452)...: - *MPIR_Bcast(1476)........: - *MPIR_Bcast_intra(1249)..: - *MPIR_SMP_Bcast(1088)....: - *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received - *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String - */ -void -test_link_chunk_io_sort_chunk_issue(void) -{ - const char *filename; - hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; - hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; - hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; - hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; - hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; - hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; - hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; - hid_t file_id = H5I_INVALID_HID; - hid_t fapl_id = H5I_INVALID_HID; - hid_t dset_id = H5I_INVALID_HID; - hid_t dcpl_id = H5I_INVALID_HID; - hid_t dxpl_id = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - int mpi_rank, mpi_size; - void *data = NULL; - void *read_buf = NULL; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or file flush aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - filename = PARATESTFILE /* GetTestParameters() */; - - fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); - - /* - * Even though the testphdf5 framework currently sets collective metadata reads - * on the FAPL, we call it here just to be sure this is futureproof, since - * demonstrating this issue relies upon it. - */ - VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - - /* - * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM - * chunks, where every rank writes to a piece of every single chunk to keep utilization high. - */ - dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM; - - fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL); - VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - - /* - * Set up chunking on the dataset in order to reproduce the problem. - */ - dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); - - /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */ - chunk_dims[0] = (hsize_t)mpi_size; - - VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0), - "H5Pset_chunk succeeded"); - - dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id, - H5P_DEFAULT, dcpl_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); - - /* - * Setup hyperslab selection to split the dataset among the ranks. - */ - start[0] = (hsize_t)mpi_rank; - stride[0] = (hsize_t)mpi_size; - count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM; - block[0] = 1; - - VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), - "H5Sselect_hyperslab succeeded"); - - sel_dims[0] = count[0]; - - mspace_id = H5Screate_simple(1, sel_dims, NULL); - VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); - - data = calloc(1, count[0] * sizeof(int)); - VRFY((data != NULL), "calloc succeeded"); - - dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); - - /* - * Enable collective access for the data transfer. - */ - VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); - - VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); - - VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); - - /* - * Ensure that linked-chunk I/O is performed since this is - * the particular code path where the issue lies and we don't - * want the library doing multi-chunk I/O behind our backs. - */ - VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), - "H5Pset_dxpl_mpio_chunk_opt succeeded"); - - read_buf = malloc(count[0] * sizeof(int)); - VRFY((read_buf != NULL), "malloc succeeded"); - - VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), - "H5Sselect_hyperslab succeeded"); - - sel_dims[0] = count[0]; - - VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); - - mspace_id = H5Screate_simple(1, sel_dims, NULL); - VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); - - /* - * Finally have each rank read their section of data back from the dataset. - */ - VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), - "H5Dread succeeded"); - - if (data) { - free(data); - data = NULL; - } - - if (read_buf) { - free(read_buf); - read_buf = NULL; - } - - VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); - VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); - VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); - VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); -} - -/* - * A test for GitHub issue #2433 which causes a collective metadata write - * of global heap data. This test is meant to ensure that global heap data - * gets correctly mapped as raw data during a collective metadata write - * using vector I/O. - * - * An assertion exists in the library that should be triggered if global - * heap data is not correctly mapped as raw data. - */ -void -test_collective_global_heap_write(void) -{ - const char *filename; - hsize_t attr_dims[COLL_GHEAP_WRITE_ATTR_DIMS]; - hid_t file_id = H5I_INVALID_HID; - hid_t fapl_id = H5I_INVALID_HID; - hid_t attr_id = H5I_INVALID_HID; - hid_t vl_type = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hvl_t vl_data; - int mpi_rank, mpi_size; - int data_buf[COLL_GHEAP_WRITE_ATTR_NELEMS]; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset or file flush aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - filename = PARATESTFILE /* GetTestParameters() */; - - fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); - - /* - * Even though the testphdf5 framework currently sets collective metadata - * writes on the FAPL, we call it here just to be sure this is futureproof, - * since demonstrating this issue relies upon it. - */ - VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "Set collective metadata writes succeeded"); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - - attr_dims[0] = 1; - - fspace_id = H5Screate_simple(COLL_GHEAP_WRITE_ATTR_DIMS, attr_dims, NULL); - VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - - vl_type = H5Tvlen_create(H5T_NATIVE_INT); - VRFY((vl_type >= 0), "H5Tvlen_create succeeded"); - - vl_data.len = COLL_GHEAP_WRITE_ATTR_NELEMS; - vl_data.p = data_buf; - - /* - * Create a variable-length attribute that will get written to the global heap - */ - attr_id = H5Acreate2(file_id, COLL_GHEAP_WRITE_ATTR_NAME, vl_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT); - VRFY((attr_id >= 0), "H5Acreate2 succeeded"); - - for (size_t i = 0; i < COLL_GHEAP_WRITE_ATTR_NELEMS; i++) - data_buf[i] = (int)i; - - VRFY((H5Awrite(attr_id, vl_type, &vl_data) >= 0), "H5Awrite succeeded"); - - VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); - VRFY((H5Tclose(vl_type) >= 0), "H5Sclose succeeded"); - VRFY((H5Aclose(attr_id) >= 0), "H5Aclose succeeded"); - VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); - VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); -} diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c deleted file mode 100644 index 0da25b06463..00000000000 --- a/testpar/API/t_dset.c +++ /dev/null @@ -1,4317 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Parallel tests for datasets - */ - -/* - * Example of using the parallel HDF5 library to access datasets. - * - * This program contains three major parts. Part 1 tests fixed dimension - * datasets, for both independent and collective transfer modes. - * Part 2 tests extendible datasets, for independent transfer mode - * only. - * Part 3 tests extendible datasets, for collective transfer mode - * only. - */ - -#include "hdf5.h" -#include "testphdf5.h" - -/* - * The following are various utility routines used by the tests. - */ - -/* - * Setup the dimensions of the hyperslab. - * Two modes--by rows or by columns. - * Assume dimension rank is 2. - * BYROW divide into slabs of rows - * BYCOL divide into blocks of columns - * ZROW same as BYROW except process 0 gets 0 rows - * ZCOL same as BYCOL except process 0 gets 0 columns - */ -static void -slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - int mode) -{ - switch (mode) { - case BYROW: - /* Each process takes a slabs of rows. */ - block[0] = (hsize_t)(dim0 / mpi_size); - block[1] = (hsize_t)dim1; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)mpi_rank * block[0]; - start[1] = 0; - if (VERBOSE_MED) - printf("slab_set BYROW\n"); - break; - case BYCOL: - /* Each process takes a block of columns. */ - block[0] = (hsize_t)dim0; - block[1] = (hsize_t)(dim1 / mpi_size); - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = (hsize_t)mpi_rank * block[1]; - if (VERBOSE_MED) - printf("slab_set BYCOL\n"); - break; - case ZROW: - /* Similar to BYROW except process 0 gets 0 row */ - block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0); - block[1] = (hsize_t)dim1; - stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */ - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0); - start[1] = 0; - if (VERBOSE_MED) - printf("slab_set ZROW\n"); - break; - case ZCOL: - /* Similar to BYCOL except process 0 gets 0 column */ - block[0] = (hsize_t)dim0; - block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0); - stride[0] = block[0]; - stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */ - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0); - if (VERBOSE_MED) - printf("slab_set ZCOL\n"); - break; - default: - /* Unknown mode. Set it to cover the whole dataset. */ - printf("unknown slab_set mode (%d)\n", mode); - block[0] = (hsize_t)dim0; - block[1] = (hsize_t)dim1; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = 0; - if (VERBOSE_MED) - printf("slab_set wholeset\n"); - break; - } - if (VERBOSE_MED) { - printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " - "datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); - } -} - -/* - * Setup the coordinates for point selection. - */ -void -point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order) -{ - hsize_t i, j, k = 0, m, n, s1, s2; - - HDcompile_assert(RANK == 2); - - if (OUT_OF_ORDER == order) - k = (num_points * RANK) - 1; - else if (IN_ORDER == order) - k = 0; - - s1 = start[0]; - s2 = start[1]; - - for (i = 0; i < count[0]; i++) - for (j = 0; j < count[1]; j++) - for (m = 0; m < block[0]; m++) - for (n = 0; n < block[1]; n++) - if (OUT_OF_ORDER == order) { - coords[k--] = s2 + (stride[1] * j) + n; - coords[k--] = s1 + (stride[0] * i) + m; - } - else if (IN_ORDER == order) { - coords[k++] = s1 + stride[0] * i + m; - coords[k++] = s2 + stride[1] * j + n; - } - - if (VERBOSE_MED) { - printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " - "datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); - k = 0; - for (i = 0; i < num_points; i++) { - printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); - k += 2; - } - } -} - -/* - * Fill the dataset with trivial data for testing. - * Assume dimension rank is 2 and data is stored contiguous. - */ -static void -dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset) -{ - DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* put some trivial data in the data_array */ - for (i = 0; i < block[0]; i++) { - for (j = 0; j < block[1]; j++) { - *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1)); - dataptr++; - } - } -} - -/* - * Print the content of the dataset. - */ -static void -dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) -{ - DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* print the column heading */ - printf("%-8s", "Cols:"); - for (j = 0; j < block[1]; j++) { - printf("%3lu ", (unsigned long)(start[1] + j)); - } - printf("\n"); - - /* print the slab data */ - for (i = 0; i < block[0]; i++) { - printf("Row %2lu: ", (unsigned long)(i + start[0])); - for (j = 0; j < block[1]; j++) { - printf("%03d ", *dataptr++); - } - printf("\n"); - } -} - -/* - * Print the content of the dataset. - */ -int -dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, - DATATYPE *original) -{ - hsize_t i, j; - int vrfyerrs; - - /* print it if VERBOSE_MED */ - if (VERBOSE_MED) { - printf("dataset_vrfy dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], - (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], - (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); - dataset_print(start, block, original); - printf("compared values:\n"); - dataset_print(start, block, dataset); - } - - vrfyerrs = 0; - for (i = 0; i < block[0]; i++) { - for (j = 0; j < block[1]; j++) { - if (*dataset != *original) { - if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { - printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]), - (unsigned long)(j + start[1]), *(original), *(dataset)); - } - dataset++; - original++; - } - } - } - if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if (vrfyerrs) - printf("%d errors found in dataset_vrfy\n", vrfyerrs); - return (vrfyerrs); -} - -/* - * Part 1.a--Independent read/write for fixed dimension datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two datasets - * in one HDF5 files with parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. - */ - -void -dataset_writeInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - const char *filename; - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* ---------------------------------------- - * CREATE AN HDF5 FILE WITH PARALLEL ACCESS - * ---------------------------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* --------------------------------------------- - * Define the dimensions of the overall datasets - * and the slabs local to the MPI process. - * ------------------------------------------- */ - /* setup dimensionality object */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create a dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* - * To test the independent orders of writes between processes, all - * even number processes write to dataset1 first, then dataset2. - * All odd number processes write to dataset2 first, then dataset1. - */ - - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - - /* setup dimensions again to write with zero rows for process 0 */ - if (VERBOSE_MED) - printf("writeInd by some with zero row\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if (MAINPROCESS) { - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("writeInd by some with zero row"); - if ((mpi_rank / 2) * 2 != mpi_rank) { - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); - } -#ifdef BARRIER_CHECKS - MPI_Barrier(MPI_COMM_WORLD); -#endif /* BARRIER_CHECKS */ - - /* release dataspace ID */ - H5Sclose(file_dataspace); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* release all IDs created */ - H5Sclose(sid); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); -} - -/* Example of using the parallel HDF5 library to read a dataset */ -void -dataset_readInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - const char *filename; - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - /* release all IDs created */ - H5Sclose(file_dataspace); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); - if (data_origin1) - free(data_origin1); -} - -/* - * Part 1.b--Collective read/write for fixed dimension datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and - * each process controls a hyperslab within.] - */ - -void -dataset_writeAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */ - hid_t dataset5, dataset6, dataset7; /* Dataset ID */ - hid_t datatype; /* Datatype ID */ - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - const char *filename; - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - hsize_t current_dims; /* for point selection */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Collective write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* set up the coords array selection */ - num_points = (size_t)dim1; - coords = (hsize_t *)malloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t)); - VRFY((coords != NULL), "coords malloc succeeded"); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* -------------------------- - * Define the dimensions of the overall datasets - * and create the dataset - * ------------------------- */ - /* setup 2-D dimensionality object */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create a dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another dataset collectively */ - datatype = H5Tcopy(H5T_NATIVE_INT); - ret = H5Tset_order(datatype, H5T_ORDER_LE); - VRFY((ret >= 0), "H5Tset_order succeeded"); - - dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded"); - - /* create a third dataset collectively */ - dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset3 >= 0), "H5Dcreate2 succeeded"); - - dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset5 >= 0), "H5Dcreate2 succeeded"); - dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset6 >= 0), "H5Dcreate2 succeeded"); - dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset7 >= 0), "H5Dcreate2 succeeded"); - - /* release 2-D space ID created */ - H5Sclose(sid); - - /* setup scalar dimensionality object */ - sid = H5Screate(H5S_SCALAR); - VRFY((sid >= 0), "H5Screate succeeded"); - - /* create a fourth dataset collectively */ - dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset4 >= 0), "H5Dcreate2 succeeded"); - - /* release scalar space ID created */ - H5Sclose(sid); - - /* - * Set up dimensions of the slab this process accesses. - */ - - /* Dataset1: each process takes a block of rows. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - MESG("writeAll by Row"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - - /* setup dimensions again to writeAll with zero rows for process 0 */ - if (VERBOSE_MED) - printf("writeAll by some with zero row\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if (MAINPROCESS) { - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("writeAll by some with zero row"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); - - /* release all temporary handles. */ - /* Could have used them for dataset2 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset2: each process takes a block of columns. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - - /* setup dimensions again to writeAll with zero columns for process 0 */ - if (VERBOSE_MED) - printf("writeAll by some with zero col\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if (MAINPROCESS) { - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("writeAll by some with zero col"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded"); - - /* release all temporary handles. */ - /* Could have used them for dataset3 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset3); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - if (MAINPROCESS) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded"); - } /* end if */ - else { - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); - } /* end else */ - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - if (MAINPROCESS) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded"); - } /* end if */ - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } /* end if */ - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - MESG("writeAll with none"); - ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); - - /* write data collectively (with datatype conversion) */ - MESG("writeAll with none"); - ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); - - /* release all temporary handles. */ - /* Could have used them for dataset4 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset4: each process writes no data, except process zero uses "all" selection. */ - /* Additionally, these are in a scalar dataspace */ - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset4); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - if (MAINPROCESS) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded"); - } /* end if */ - else { - ret = H5Sselect_all(file_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } /* end else */ - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate(H5S_SCALAR); - VRFY((mem_dataspace >= 0), ""); - if (MAINPROCESS) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded"); - } /* end if */ - else { - ret = H5Sselect_all(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } /* end else */ - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } /* end if */ - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - MESG("writeAll with scalar dataspace"); - ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); - - /* write data collectively (with datatype conversion) */ - MESG("writeAll with scalar dataspace"); - ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - if (data_array1) - free(data_array1); - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - block[0] = 1; - block[1] = (hsize_t)dim1; - stride[0] = 1; - stride[1] = (hsize_t)dim1; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); - start[1] = 0; - - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* Dataset5: point selection in File - Hyperslab selection in Memory*/ - /* create a file dataspace independently */ - point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); - file_dataspace = H5Dget_space(dataset5); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - start[0] = 0; - start[1] = 0; - mem_dataspace = H5Dget_space(dataset5); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset5 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset6: point selection in File - Point selection in Memory*/ - /* create a file dataspace independently */ - start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); - start[1] = 0; - point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); - file_dataspace = H5Dget_space(dataset6); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - start[0] = 0; - start[1] = 0; - point_set(start, count, stride, block, num_points, coords, IN_ORDER); - mem_dataspace = H5Dget_space(dataset6); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset6 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset7: point selection in File - All selection in Memory*/ - /* create a file dataspace independently */ - start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); - start[1] = 0; - point_set(start, count, stride, block, num_points, coords, IN_ORDER); - file_dataspace = H5Dget_space(dataset7); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - current_dims = num_points; - mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); - VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); - - ret = H5Sselect_all(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_all succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset7 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* - * All writes completed. Close datasets collectively - */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - ret = H5Dclose(dataset3); - VRFY((ret >= 0), "H5Dclose3 succeeded"); - ret = H5Dclose(dataset4); - VRFY((ret >= 0), "H5Dclose4 succeeded"); - ret = H5Dclose(dataset5); - VRFY((ret >= 0), "H5Dclose5 succeeded"); - ret = H5Dclose(dataset6); - VRFY((ret >= 0), "H5Dclose6 succeeded"); - ret = H5Dclose(dataset7); - VRFY((ret >= 0), "H5Dclose7 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (coords) - free(coords); - if (data_array1) - free(data_array1); -} - -/* - * Example of using the parallel HDF5 library to read two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and - * each process controls a hyperslab within.] - */ - -void -dataset_readAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - const char *filename; - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - int i, j, k; - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Collective read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* set up the coords array selection */ - num_points = (size_t)dim1; - coords = (hsize_t *)malloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t)); - VRFY((coords != NULL), "coords malloc succeeded"); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); - VRFY((fid >= 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* -------------------------- - * Open the datasets in it - * ------------------------- */ - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded"); - - /* open another dataset collectively */ - dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT); - VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded"); - dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT); - VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded"); - dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT); - VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded"); - - /* - * Set up dimensions of the slab this process accesses. - */ - - /* Dataset1: each process takes a block of columns. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_origin1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset1 succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* setup dimensions again to readAll with zero columns for process 0 */ - if (VERBOSE_MED) - printf("readAll by some with zero col\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if (MAINPROCESS) { - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("readAll by some with zero col"); - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* release all temporary handles. */ - /* Could have used them for dataset2 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset2: each process takes a block of rows. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_origin1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset2 succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* setup dimensions again to readAll with zero rows for process 0 */ - if (VERBOSE_MED) - printf("readAll by some with zero row\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if (MAINPROCESS) { - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("readAll by some with zero row"); - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - if (data_array1) - free(data_array1); - if (data_origin1) - free(data_origin1); - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - block[0] = 1; - block[1] = (hsize_t)dim1; - stride[0] = 1; - stride[1] = (hsize_t)dim1; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); - start[1] = 0; - - dataset_fill(start, block, data_origin1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_origin1); - } - - /* Dataset5: point selection in memory - Hyperslab selection in file*/ - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset5); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - start[0] = 0; - start[1] = 0; - point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); - mem_dataspace = H5Dget_space(dataset5); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset5 succeeded"); - - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - if (data_array1) - free(data_array1); - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* Dataset6: point selection in File - Point selection in Memory*/ - /* create a file dataspace independently */ - start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); - start[1] = 0; - point_set(start, count, stride, block, num_points, coords, IN_ORDER); - file_dataspace = H5Dget_space(dataset6); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - start[0] = 0; - start[1] = 0; - point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); - mem_dataspace = H5Dget_space(dataset6); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset6 succeeded"); - - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if (ret) - nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - if (data_array1) - free(data_array1); - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* Dataset7: point selection in memory - All selection in file*/ - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset7); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_all(file_dataspace); - VRFY((ret >= 0), "H5Sselect_all succeeded"); - - num_points = (size_t)(dim0 * dim1); - k = 0; - for (i = 0; i < dim0; i++) { - for (j = 0; j < dim1; j++) { - coords[k++] = (hsize_t)i; - coords[k++] = (hsize_t)j; - } - } - mem_dataspace = H5Dget_space(dataset7); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset7 succeeded"); - - start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); - start[1] = 0; - ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank), - data_origin1); - if (ret) - nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* - * All reads completed. Close datasets collectively - */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - ret = H5Dclose(dataset5); - VRFY((ret >= 0), "H5Dclose5 succeeded"); - ret = H5Dclose(dataset6); - VRFY((ret >= 0), "H5Dclose6 succeeded"); - ret = H5Dclose(dataset7); - VRFY((ret >= 0), "H5Dclose7 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (coords) - free(coords); - if (data_array1) - free(data_array1); - if (data_origin1) - free(data_origin1); -} - -/* - * Part 2--Independent read/write for extendible datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two extendible - * datasets in one HDF5 file with independent parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. - */ - -void -extend_writeInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK]; /* for hyperslab setting */ - hsize_t stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = (hsize_t)chunkdim0; - chunk_dims[1] = (hsize_t)chunkdim1; - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ - { - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts = 4; - ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); - } - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - if (VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - /* start out with no rows, extend it later. */ - dims[0] = dims[1] = 0; - sid = H5Screate_simple(RANK, dims, max_dims); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); - - /* ------------------------- - * Test writing to dataset1 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Extend its current dim sizes before writing */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - - /* ------------------------- - * Test writing to dataset2 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently. Should fail. */ - H5E_BEGIN_TRY - { - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - } - H5E_END_TRY - VRFY((ret < 0), "H5Dwrite failed as expected"); - - H5Sclose(file_dataspace); - - /* Extend dataset2 and try again. Should succeed. */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset2, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); -} - -/* - * Example of using the parallel HDF5 library to create an extendable dataset - * and perform I/O on it in a way that verifies that the chunk cache is - * bypassed for parallel I/O. - */ - -void -extend_writeInd2(void) -{ - const char *filename; - hid_t fid; /* HDF5 file ID */ - hid_t fapl; /* File access templates */ - hid_t fs; /* File dataspace ID */ - hid_t ms; /* Memory dataspace ID */ - hid_t dataset; /* Dataset ID */ - hsize_t orig_size = 10; /* Original dataset dim size */ - hsize_t new_size = 20; /* Extended dataset dim size */ - hsize_t one = 1; - hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ - hsize_t chunk_size = 16384; /* chunk size */ - hid_t dcpl; /* dataset create prop. list */ - int written[10], /* Data to write */ - retrieved[10]; /* Data read in */ - int mpi_size, mpi_rank; /* MPI settings */ - int i; /* Local index variable */ - herr_t ret; /* Generic return value */ - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Extend independent write test #2 on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl >= 0), "create_faccess_plist succeeded"); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dcpl, 1, &chunk_size); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - fs = H5Screate_simple(1, &orig_size, &max_size); - VRFY((fs >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreat2e succeeded"); - - /* release resource */ - ret = H5Pclose(dcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* ------------------------- - * Test writing to dataset - * -------------------------*/ - /* create a memory dataspace independently */ - ms = H5Screate_simple(1, &orig_size, &max_size); - VRFY((ms >= 0), "H5Screate_simple succeeded"); - - /* put some trivial data in the data_array */ - for (i = 0; i < (int)orig_size; i++) - written[i] = i; - MESG("data array initialized"); - if (VERBOSE_MED) { - MESG("writing at offset zero: "); - for (i = 0; i < (int)orig_size; i++) - printf("%s%d", i ? ", " : "", written[i]); - printf("\n"); - } - ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* ------------------------- - * Read initial data from dataset. - * -------------------------*/ - ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); - VRFY((ret >= 0), "H5Dread succeeded"); - for (i = 0; i < (int)orig_size; i++) - if (written[i] != retrieved[i]) { - printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, - written[i], i, retrieved[i]); - nerrors++; - } - if (VERBOSE_MED) { - MESG("read at offset zero: "); - for (i = 0; i < (int)orig_size; i++) - printf("%s%d", i ? ", " : "", retrieved[i]); - printf("\n"); - } - - /* ------------------------- - * Extend the dataset & retrieve new dataspace - * -------------------------*/ - ret = H5Dset_extent(dataset, &new_size); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - ret = H5Sclose(fs); - VRFY((ret >= 0), "H5Sclose succeeded"); - fs = H5Dget_space(dataset); - VRFY((fs >= 0), "H5Dget_space succeeded"); - - /* ------------------------- - * Write to the second half of the dataset - * -------------------------*/ - for (i = 0; i < (int)orig_size; i++) - written[i] = (int)orig_size + i; - MESG("data array re-initialized"); - if (VERBOSE_MED) { - MESG("writing at offset 10: "); - for (i = 0; i < (int)orig_size; i++) - printf("%s%d", i ? ", " : "", written[i]); - printf("\n"); - } - ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size); - VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); - ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* ------------------------- - * Read the new data - * -------------------------*/ - ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); - VRFY((ret >= 0), "H5Dread succeeded"); - for (i = 0; i < (int)orig_size; i++) - if (written[i] != retrieved[i]) { - printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, - written[i], i, retrieved[i]); - nerrors++; - } - if (VERBOSE_MED) { - MESG("read at offset 10: "); - for (i = 0; i < (int)orig_size; i++) - printf("%s%d", i ? ", " : "", retrieved[i]); - printf("\n"); - } - - /* Close dataset collectively */ - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - - /* Close the file collectively */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); -} - -/* Example of using the parallel HDF5 library to read an extendible dataset */ -void -extend_readInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - const char *filename; - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Extend independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_array2 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array2 != NULL), "data_array2 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - /* Try extend dataset1 which is open RDONLY. Should fail. */ - - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); - VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); - dims[0]++; - H5E_BEGIN_TRY - { - ret = H5Dset_extent(dataset1, dims); - } - H5E_END_TRY - VRFY((ret < 0), "H5Dset_extent failed as expected"); - - H5Sclose(file_dataspace); - - /* Read dataset1 using BYROW pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset1 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - - /* Read dataset2 using BYCOL pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset2 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); - if (data_array2) - free(data_array2); - if (data_origin1) - free(data_origin1); -} - -/* - * Part 3--Collective read/write for extendible datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two extendible - * datasets in one HDF5 file with collective parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. - */ - -void -extend_writeAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK]; /* for hyperslab setting */ - hsize_t stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = (hsize_t)chunkdim0; - chunk_dims[1] = (hsize_t)chunkdim1; - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ - { - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts = 4; - ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); - } - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - if (VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - /* start out with no rows, extend it later. */ - dims[0] = dims[1] = 0; - sid = H5Screate_simple(RANK, dims, max_dims); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); - - /* ------------------------- - * Test writing to dataset1 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Extend its current dim sizes before writing */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* ------------------------- - * Test writing to dataset2 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently. Should fail. */ - H5E_BEGIN_TRY - { - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - } - H5E_END_TRY - VRFY((ret < 0), "H5Dwrite failed as expected"); - - H5Sclose(file_dataspace); - - /* Extend dataset2 and try again. Should succeed. */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - ret = H5Dset_extent(dataset2, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); -} - -/* Example of using the parallel HDF5 library to read an extendible dataset */ -void -extend_readAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Extend independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_array2 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_array2 != NULL), "data_array2 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - /* Try extend dataset1 which is open RDONLY. Should fail. */ - - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); - VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); - dims[0]++; - H5E_BEGIN_TRY - { - ret = H5Dset_extent(dataset1, dims); - } - H5E_END_TRY - VRFY((ret < 0), "H5Dset_extent failed as expected"); - - H5Sclose(file_dataspace); - - /* Read dataset1 using BYROW pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset1 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - H5Pclose(xfer_plist); - - /* Read dataset2 using BYCOL pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset2 read verified correct"); - if (ret) - nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - H5Pclose(xfer_plist); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_array1) - free(data_array1); - if (data_array2) - free(data_array2); - if (data_origin1) - free(data_origin1); -} - -#ifdef H5_HAVE_FILTER_DEFLATE -/* - * Example of using the parallel HDF5 library to read a compressed - * dataset in an HDF5 file with collective parallel access support. - */ -void -compress_readAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t dcpl; /* Dataset creation property list */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t dataspace; /* Dataspace ID */ - hid_t dataset; /* Dataset ID */ - int rank = 1; /* Dataspace rank */ - hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */ - unsigned u; /* Local index variable */ - unsigned chunk_opts; /* Chunk options */ - unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ - DATATYPE *data_read = NULL; /* data buffer */ - DATATYPE *data_orig = NULL; /* expected data buffer */ - const char *filename; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - int mpi_size, mpi_rank; - herr_t ret; /* Generic return value */ - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Collective chunked dataset read test on file %s\n", filename); - - /* Retrieve MPI parameters */ - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - /* Allocate data buffer */ - data_orig = (DATATYPE *)malloc((size_t)dim * sizeof(DATATYPE)); - VRFY((data_orig != NULL), "data_origin1 malloc succeeded"); - data_read = (DATATYPE *)malloc((size_t)dim * sizeof(DATATYPE)); - VRFY((data_read != NULL), "data_array1 malloc succeeded"); - - /* Initialize data buffers */ - for (u = 0; u < dim; u++) - data_orig[u] = (DATATYPE)u; - - /* Run test both with and without filters disabled on partial chunks */ - for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; - disable_partial_chunk_filters++) { - /* Process zero creates the file with a compressed, chunked dataset */ - if (mpi_rank == 0) { - hsize_t chunk_dim; /* Chunk dimensions */ - - /* Create the file */ - fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - VRFY((fid > 0), "H5Fcreate succeeded"); - - /* Create property list for chunking and compression */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl > 0), "H5Pcreate succeeded"); - - ret = H5Pset_layout(dcpl, H5D_CHUNKED); - VRFY((ret >= 0), "H5Pset_layout succeeded"); - - /* Use eight chunks */ - chunk_dim = dim / 8; - ret = H5Pset_chunk(dcpl, rank, &chunk_dim); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* Set chunk options appropriately */ - if (disable_partial_chunk_filters) { - ret = H5Pget_chunk_opts(dcpl, &chunk_opts); - VRFY((ret >= 0), "H5Pget_chunk_opts succeeded"); - - chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; - - ret = H5Pset_chunk_opts(dcpl, chunk_opts); - VRFY((ret >= 0), "H5Pset_chunk_opts succeeded"); - } /* end if */ - - ret = H5Pset_deflate(dcpl, 9); - VRFY((ret >= 0), "H5Pset_deflate succeeded"); - - /* Create dataspace */ - dataspace = H5Screate_simple(rank, &dim, NULL); - VRFY((dataspace > 0), "H5Screate_simple succeeded"); - - /* Create dataset */ - dataset = - H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset > 0), "H5Dcreate2 succeeded"); - - /* Write compressed data */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* Close objects */ - ret = H5Pclose(dcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Sclose(dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - /* Wait for file to be created */ - MPI_Barrier(comm); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); - VRFY((fid > 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* Open dataset with compressed chunks */ - dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); - VRFY((dataset > 0), "H5Dopen2 succeeded"); - - /* Try reading & writing data */ - if (dataset > 0) { - /* Create dataset transfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist > 0), "H5Pcreate succeeded"); - - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* Try reading the data */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* Verify data read */ - for (u = 0; u < dim; u++) - if (data_orig[u] != data_read[u]) { - printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__, - (unsigned)u, data_orig[u], (unsigned)u, data_read[u]); - nerrors++; - } - -#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - VRFY((ret >= 0), "H5Dwrite succeeded"); -#endif - - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - } /* end if */ - - /* Close file */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } /* end for */ - - /* release data buffers */ - if (data_read) - free(data_read); - if (data_orig) - free(data_orig); -} -#endif /* H5_HAVE_FILTER_DEFLATE */ - -/* - * Part 4--Non-selection for chunked dataset - */ - -/* - * Example of using the parallel HDF5 library to create chunked - * dataset in one HDF5 file with collective and independent parallel - * MPIO access support. The Datasets are of sizes dim0 x dim1. - * Each process controls only a slab of size dim0 x dim1 within the - * dataset with the exception that one processor selects no element. - */ - -void -none_selection_chunk(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_origin = NULL; /* data buffer */ - DATATYPE *data_array = NULL; /* data buffer */ - hsize_t chunk_dims[RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK]; /* for hyperslab setting */ - hsize_t stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - hsize_t mstart[RANK]; /* for data buffer in memory */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - filename = PARATESTFILE /* GetTestParameters() */; - if (VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = (hsize_t)chunkdim0; - chunk_dims[1] = (hsize_t)chunkdim1; - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - if (VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); - - /* ------------------------- - * Test collective writing to dataset1 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* allocate memory for data buffer. Only allocate enough buffer for - * each processor's data. */ - if (mpi_rank) { - data_origin = (DATATYPE *)malloc(block[0] * block[1] * sizeof(DATATYPE)); - VRFY((data_origin != NULL), "data_origin malloc succeeded"); - - data_array = (DATATYPE *)malloc(block[0] * block[1] * sizeof(DATATYPE)); - VRFY((data_array != NULL), "data_array malloc succeeded"); - - /* put some trivial data in the data_array */ - mstart[0] = mstart[1] = 0; - dataset_fill(mstart, block, data_origin); - MESG("data_array initialized"); - if (VERBOSE_MED) { - MESG("data_array created"); - dataset_print(mstart, block, data_origin); - } - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Process 0 has no selection */ - if (!mpi_rank) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space(dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Process 0 has no selection */ - if (!mpi_rank) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - if (mpi_rank) { - ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); - if (ret) - nerrors++; - } - - /* ------------------------- - * Test independent writing to dataset2 - * -------------------------*/ - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* write data collectively */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - if (mpi_rank) { - ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); - if (ret) - nerrors++; - } - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if (data_origin) - free(data_origin); - if (data_array) - free(data_array); -} - -/* Function: test_actual_io_mode - * - * Purpose: tests one specific case of collective I/O and checks that the - * actual_chunk_opt_mode property and the actual_io_mode - * properties in the DXPL have the correct values. - * - * Input: selection_mode: changes the way processes select data from the space, as well - * as some dxpl flags to get collective I/O to break in different ways. - * - * The relevant I/O function and expected response for each mode: - * TEST_ACTUAL_IO_MULTI_CHUNK_IND: - * H5D_mpi_chunk_collective_io, each process reports independent I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_COL: - * H5D_mpi_chunk_collective_io, each process reports collective I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_MIX: - * H5D_mpi_chunk_collective_io, each process reports mixed I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: - * H5D_mpi_chunk_collective_io, processes disagree. The root reports - * collective, the rest report independent I/O - * - * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: - * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND. - * Set directly go to multi-chunk-io without num threshold calc. - * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: - * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL. - * Set directly go to multi-chunk-io without num threshold calc. - * - * TEST_ACTUAL_IO_LINK_CHUNK: - * H5D_link_chunk_collective_io, processes report linked chunk I/O - * - * TEST_ACTUAL_IO_CONTIGUOUS: - * H5D__contig_collective_write or H5D__contig_collective_read - * each process reports contiguous collective I/O - * - * TEST_ACTUAL_IO_NO_COLLECTIVE: - * Simple independent I/O. This tests that the defaults are properly set. - * - * TEST_ACTUAL_IO_RESET: - * Performs collective and then independent I/O with hthe same dxpl to - * make sure the property is correctly reset to the default on each use. - * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE - * (The most complex case that works on all builds) and then performs - * an independent read and write with the same dxpls. - * - * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE - * is not needed as they are covered by DIRECT_CHUNK_MIX and - * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing - * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold. - */ -static void -test_actual_io_mode(int selection_mode) -{ - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE; - H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE; - H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; - const char *filename; - const char *test_name; - bool direct_multi_chunk_io; - bool multi_chunk_io; - bool is_chunked; - bool is_collective; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int *buffer; - int i; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl = -1; - hid_t mem_space = -1; - hid_t file_space = -1; - hid_t dcpl = -1; - hid_t dxpl_write = -1; - hid_t dxpl_read = -1; - hsize_t dims[RANK]; - hsize_t chunk_dims[RANK]; - hsize_t start[RANK]; - hsize_t stride[RANK]; - hsize_t count[RANK]; - hsize_t block[RANK]; - char message[256]; - herr_t ret; - - /* Set up some flags to make some future if statements slightly more readable */ - direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - - /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then - * tests independent I/O - */ - multi_chunk_io = - (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET); - - is_chunked = - (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); - - is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE; - - /* Set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - MPI_Barrier(MPI_COMM_WORLD); - - assert(mpi_size >= 1); - - mpi_comm = MPI_COMM_WORLD; - mpi_info = MPI_INFO_NULL; - - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - assert(filename != NULL); - - /* Setup the file access template */ - fapl = create_faccess_plist(mpi_comm, mpi_info, facc_type); - VRFY((fapl >= 0), "create_faccess_plist() succeeded"); - - /* Create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Create the basic Space */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - /* If we are not testing contiguous datasets */ - if (is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0] / (hsize_t)mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0), "chunk creation property list succeeded"); - } - - /* Create the dataset */ - dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - - /* Create the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Choose a selection method based on the type of I/O we want to occur, - * and also set up some selection-dependeent test info. */ - switch (selection_mode) { - - /* Independent I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_IND: - case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: - /* Since the dataset is chunked by row and each process selects a row, - * each process writes to a different chunk. This forces all I/O to be - * independent. - */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Multi Chunk - Independent"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - break; - - /* Collective I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_COL: - case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: - /* The dataset is chunked by rows, so each process takes a column which - * spans all chunks. Since the processes write non-overlapping regular - * selections to each chunk, the operation is purely collective. - */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - test_name = "Multi Chunk - Collective"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if (mpi_size > 1) - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - else - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - break; - - /* Mixed I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_MIX: - /* A chunk will be assigned collective I/O only if it is selected by each - * process. To get mixed I/O, have the root select all chunks and each - * subsequent process select the first and nth chunk. The first chunk, - * accessed by all, will be assigned collective I/O while each other chunk - * will be accessed only by the root and the nth process and will be - * assigned independent I/O. Each process will access one chunk collectively - * and at least one chunk independently, reporting mixed I/O. - */ - - if (mpi_rank == 0) { - /* Select the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - } - else { - /* Select the first and the nth chunk in the nth column */ - block[0] = (hsize_t)(dim0 / mpi_size); - block[1] = (hsize_t)(dim1 / mpi_size); - count[0] = 2; - count[1] = 1; - stride[0] = (hsize_t)mpi_rank * block[0]; - stride[1] = 1; - start[0] = 0; - start[1] = (hsize_t)mpi_rank * block[1]; - } - - test_name = "Multi Chunk - Mixed"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; - break; - - /* RESET tests that the properties are properly reset to defaults each time I/O is - * performed. To achieve this, we have RESET perform collective I/O (which would change - * the values from the defaults) followed by independent I/O (which should report the - * default values). RESET doesn't need to have a unique selection, so we reuse - * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works - * on all builds. The independent section of RESET can be found at the end of this function. - */ - case TEST_ACTUAL_IO_RESET: - - /* Mixed I/O with optimization and internal disagreement */ - case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: - /* A chunk will be assigned collective I/O only if it is selected by each - * process. To get mixed I/O with disagreement, assign process n to the - * first chunk and the nth chunk. The first chunk, selected by all, is - * assgigned collective I/O, while each other process gets independent I/O. - * Since the root process with only access the first chunk, it will report - * collective I/O. The subsequent processes will access the first chunk - * collectively, and their other chunk independently, reporting mixed I/O. - */ - - if (mpi_rank == 0) { - /* Select the first chunk in the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - block[0] = block[0] / (hsize_t)mpi_size; - } - else { - /* Select the first and the nth chunk in the nth column */ - block[0] = (hsize_t)(dim0 / mpi_size); - block[1] = (hsize_t)(dim1 / mpi_size); - count[0] = 2; - count[1] = 1; - stride[0] = (hsize_t)mpi_rank * block[0]; - stride[1] = 1; - start[0] = 0; - start[1] = (hsize_t)mpi_rank * block[1]; - } - - /* If the testname was not already set by the RESET case */ - if (selection_mode == TEST_ACTUAL_IO_RESET) - test_name = "RESET"; - else - test_name = "Multi Chunk - Mixed (Disagreement)"; - - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if (mpi_size > 1) { - if (mpi_rank == 0) - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - else - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; - } - else - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - - break; - - /* Linked Chunk I/O */ - case TEST_ACTUAL_IO_LINK_CHUNK: - /* Nothing special; link chunk I/O is forced in the dxpl settings. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Link Chunk"; - actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - break; - - /* Contiguous Dataset */ - case TEST_ACTUAL_IO_CONTIGUOUS: - /* A non overlapping, regular selection in a contiguous dataset leads to - * collective I/O */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Contiguous"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; - break; - - case TEST_ACTUAL_IO_NO_COLLECTIVE: - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Independent"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; - break; - - default: - test_name = "Undefined Selection Mode"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; - break; - } - - ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Create a memory dataspace mirroring the dataset and select the same hyperslab - * as in the file space. - */ - mem_space = H5Screate_simple(RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - - ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Get the number of elements in the selection */ - length = dim0 * dim1; - - /* Allocate and initialize the buffer */ - buffer = (int *)malloc(sizeof(int) * (size_t)length); - VRFY((buffer != NULL), "malloc of buffer succeeded"); - for (i = 0; i < length; i++) - buffer[i] = i; - - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - /* Set collective I/O properties in the dxpl. */ - if (is_collective) { - /* Request collective I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Set the threshold number of processes per chunk to twice mpi_size. - * This will prevent the threshold from ever being met, thus forcing - * multi chunk io instead of link chunk io. - * This is via default. - */ - if (multi_chunk_io) { - /* force multi-chunk-io by threshold */ - ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); - - /* set this to manipulate testing scenario about allocating processes - * to chunks */ - ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); - } - - /* Set directly go to multi-chunk-io without threshold calc. */ - if (direct_multi_chunk_io) { - /* set for multi chunk io by property*/ - ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - } - - /* Make a copy of the dxpl to test the read operation */ - dxpl_read = H5Pcopy(dxpl_write); - VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Retrieve Actual io values */ - ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Retrieve Actual io values */ - ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - /* Check write vs read */ - VRFY((actual_io_mode_read == actual_io_mode_write), - "reading and writing are the same for actual_io_mode"); - VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), - "reading and writing are the same for actual_chunk_opt_mode"); - - /* Test values */ - if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 && - actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) { - snprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n", - test_name); - VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); - snprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name); - VRFY((actual_io_mode_write == actual_io_mode_expected), message); - } - else { - fprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write, - actual_io_mode_write); - } - - /* To test that the property is successfully reset to the default, we perform some - * independent I/O after the collective I/O - */ - if (selection_mode == TEST_ACTUAL_IO_RESET) { - if (mpi_rank == 0) { - /* Switch to independent io */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Write */ - ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Check Properties */ - ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset write (independent)"); - VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset write (independent)"); - - /* Read */ - ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Check Properties */ - ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY((ret >= 0), "retrieving actual io mode succeeded"); - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); - - VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset read (independent)"); - VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset read (independent)"); - } - } - - /* Release some resources */ - ret = H5Sclose(sid); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Pclose(dcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Pclose(dxpl_write); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Pclose(dxpl_read); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(mem_space); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(file_space); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - free(buffer); - return; -} - -/* Function: actual_io_mode_tests - * - * Purpose: Tests all possible cases of the actual_io_mode property. - * - */ -void -actual_io_mode_tests(void) -{ - int mpi_size = -1; - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Only run these tests if selection I/O is not being used - selection I/O - * bypasses this IO mode decision - it's effectively always multi chunk - * currently */ - if (/* !H5_use_selection_io_g */ true) { - test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); - - /* - * Test multi-chunk-io via proc_num threshold - */ - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); - - /* The Multi Chunk Mixed test requires at least three processes. */ - if (mpi_size > 2) - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); - else - fprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); - - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); - - /* - * Test multi-chunk-io via setting direct property - */ - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - - test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); - test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); - - test_actual_io_mode(TEST_ACTUAL_IO_RESET); - } - - return; -} - -/* - * Function: test_no_collective_cause_mode - * - * Purpose: - * tests cases for broken collective I/O and checks that the - * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values. - * - * Input: - * selection_mode: various mode to cause broken collective I/O - * Note: Originally, each TEST case is supposed to be used alone. - * After some discussion, this is updated to take multiple TEST cases - * with '|'. However there is no error check for any of combined - * test cases, so a tester is responsible to understand and feed - * proper combination of TESTs if needed. - * - * - * TEST_COLLECTIVE: - * Test for regular collective I/O without cause of breaking. - * Just to test normal behavior. - * - * TEST_SET_INDEPENDENT: - * Test for Independent I/O as the cause of breaking collective I/O. - * - * TEST_DATATYPE_CONVERSION: - * Test for Data Type Conversion as the cause of breaking collective I/O. - * - * TEST_DATA_TRANSFORMS: - * Test for Data Transform feature as the cause of breaking collective I/O. - * - * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES: - * Test for NULL dataspace as the cause of breaking collective I/O. - * - * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT: - * Test for Compact layout as the cause of breaking collective I/O. - * - * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL: - * Test for Externl-File storage as the cause of breaking collective I/O. - * - */ -#ifdef LATER -#define DSET_NOCOLCAUSE "nocolcause" -#endif -#define FILE_EXTERNAL "nocolcause_extern.data" -static void -test_no_collective_cause_mode(int selection_mode) -{ - uint32_t no_collective_cause_local_write = 0; - uint32_t no_collective_cause_local_read = 0; - uint32_t no_collective_cause_local_expected = 0; - uint32_t no_collective_cause_global_write = 0; - uint32_t no_collective_cause_global_read = 0; - uint32_t no_collective_cause_global_expected = 0; - - const char *filename; - const char *test_name; - bool is_chunked = 1; - bool is_independent = 0; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int *buffer; - int i; - MPI_Comm mpi_comm; - MPI_Info mpi_info; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl = -1; - hid_t dcpl = -1; - hid_t dxpl_write = -1; - hid_t dxpl_read = -1; - hsize_t dims[RANK]; - hid_t mem_space = -1; - hid_t file_space = -1; - hsize_t chunk_dims[RANK]; - herr_t ret; - /* set to global value as default */ - int l_facc_type = facc_type; - char message[256]; - - /* Set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - MPI_Barrier(MPI_COMM_WORLD); - - assert(mpi_size >= 1); - - mpi_comm = MPI_COMM_WORLD; - mpi_info = MPI_INFO_NULL; - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { - ret = H5Pset_layout(dcpl, H5D_COMPACT); - VRFY((ret >= 0), "set COMPACT layout succeeded"); - is_chunked = 0; - } - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED); - VRFY((ret >= 0), "set EXTERNAL file layout succeeded"); - is_chunked = 0; - } - - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { - sid = H5Screate(H5S_NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - is_chunked = 0; - } - else { - /* Create the basic Space */ - /* if this is a compact dataset, create a small dataspace that does not exceed 64K */ - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { - dims[0] = ROW_FACTOR * 6; - dims[1] = COL_FACTOR * 6; - } - else { - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - } - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - } - - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - assert(filename != NULL); - - /* Setup the file access template */ - fapl = create_faccess_plist(mpi_comm, mpi_info, l_facc_type); - VRFY((fapl >= 0), "create_faccess_plist() succeeded"); - - /* Create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* If we are not testing contiguous datasets */ - if (is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0] / (hsize_t)mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0), "chunk creation property list succeeded"); - } - - /* Create the dataset */ - dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - - /* - * Set expected causes and some tweaks based on the type of test - */ - if (selection_mode & TEST_DATATYPE_CONVERSION) { - test_name = "Broken Collective I/O - Datatype Conversion"; - no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION; - no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION; - /* set different sign to trigger type conversion */ - data_type = H5T_NATIVE_UINT; - } - - if (selection_mode & TEST_DATA_TRANSFORMS) { - test_name = "Broken Collective I/O - DATA Transforms"; - no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS; - no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS; - } - - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { - test_name = "Broken Collective I/O - No Simple or Scalar DataSpace"; - no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; - no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; - } - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT || - selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset"; - no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - } - - if (selection_mode & TEST_COLLECTIVE) { - test_name = "Broken Collective I/O - Not Broken"; - no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE; - no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE; - } - - if (selection_mode & TEST_SET_INDEPENDENT) { - test_name = "Broken Collective I/O - Independent"; - no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; - no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; - /* switch to independent io */ - is_independent = 1; - } - - /* use all spaces for certain tests */ - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES || - selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - file_space = H5S_ALL; - mem_space = H5S_ALL; - } - else { - /* Get the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Create the memory dataspace */ - mem_space = H5Screate_simple(RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - } - - /* Get the number of elements in the selection */ - length = (int)(dims[0] * dims[1]); - - /* Allocate and initialize the buffer */ - buffer = (int *)malloc(sizeof(int) * (size_t)length); - VRFY((buffer != NULL), "malloc of buffer succeeded"); - for (i = 0; i < length; i++) - buffer[i] = i; - - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - if (is_independent) { - /* Set Independent I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - else { - /* Set Collective I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - - if (selection_mode & TEST_DATA_TRANSFORMS) { - ret = H5Pset_data_transform(dxpl_write, "x+1"); - VRFY((ret >= 0), "H5Pset_data_transform succeeded"); - } - - /*--------------------- - * Test Write access - *---------------------*/ - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write, - &no_collective_cause_global_write); - VRFY((ret >= 0), "retrieving no collective cause succeeded"); - - /*--------------------- - * Test Read access - *---------------------*/ - - /* Make a copy of the dxpl to test the read operation */ - dxpl_read = H5Pcopy(dxpl_write); - VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read, - &no_collective_cause_global_read); - VRFY((ret >= 0), "retrieving no collective cause succeeded"); - - /* Check write vs read */ - VRFY((no_collective_cause_local_read == no_collective_cause_local_write), - "reading and writing are the same for local cause of Broken Collective I/O"); - VRFY((no_collective_cause_global_read == no_collective_cause_global_write), - "reading and writing are the same for global cause of Broken Collective I/O"); - - /* Test values */ - memset(message, 0, sizeof(message)); - snprintf(message, sizeof(message), "Local cause of Broken Collective I/O has the correct value for %s.\n", - test_name); - VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message); - memset(message, 0, sizeof(message)); - snprintf(message, sizeof(message), - "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name); - VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message); - - /* Release some resources */ - if (sid) - H5Sclose(sid); - if (dcpl) - H5Pclose(dcpl); - if (dxpl_write) - H5Pclose(dxpl_write); - if (dxpl_read) - H5Pclose(dxpl_read); - if (dataset) - H5Dclose(dataset); - if (mem_space) - H5Sclose(mem_space); - if (file_space) - H5Sclose(file_space); - if (fid) - H5Fclose(fid); - free(buffer); - - /* clean up external file */ - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) - H5Fdelete(FILE_EXTERNAL, fapl); - - if (fapl) - H5Pclose(fapl); - - return; -} - -/* Function: no_collective_cause_tests - * - * Purpose: Tests cases for broken collective IO. - * - */ -void -no_collective_cause_tests(void) -{ - /* - * Test individual cause - */ - test_no_collective_cause_mode(TEST_COLLECTIVE); - test_no_collective_cause_mode(TEST_SET_INDEPENDENT); - test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode(TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); - - /* - * Test combined causes - */ - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | - TEST_DATA_TRANSFORMS); - - return; -} - -/* - * Test consistency semantics of atomic mode - */ - -/* - * Example of using the parallel HDF5 library to create a dataset, - * where process 0 writes and the other processes read at the same - * time. If atomic mode is set correctly, the other processes should - * read the old values in the dataset or the new ones. - */ - -void -dataset_atomicity(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t dataset1; /* Dataset IDs */ - hsize_t dims[RANK]; /* dataset dim sizes */ - int *write_buf = NULL; /* data buffer */ - int *read_buf = NULL; /* data buffer */ - int buf_size; - hid_t dataset2; - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* Memory dataspace ID */ - hsize_t start[RANK]; - hsize_t stride[RANK]; - hsize_t count[RANK]; - hsize_t block[RANK]; - const char *filename; - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - int i, j, k; - bool atomicity = false; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - dim0 = 64; - dim1 = 32; - filename = PARATESTFILE /* GetTestParameters() */; - if (facc_type != FACC_MPIO) { - printf("Atomicity tests will not work without the MPIO VFD\n"); - return; - } - if (VERBOSE_MED) - printf("atomic writes to file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, basic dataset, or more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - buf_size = dim0 * dim1; - /* allocate memory for data buffer */ - write_buf = (int *)calloc((size_t)buf_size, sizeof(int)); - VRFY((write_buf != NULL), "write_buf calloc succeeded"); - /* allocate memory for data buffer */ - read_buf = (int *)calloc((size_t)buf_size, sizeof(int)); - VRFY((read_buf != NULL), "read_buf calloc succeeded"); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* setup dimensionality object */ - dims[0] = (hsize_t)dim0; - dims[1] = (hsize_t)dim1; - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create datasets */ - dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* initialize datasets to 0s */ - if (0 == mpi_rank) { - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - } - - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(sid); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - - MPI_Barrier(comm); - - /* make sure setting atomicity fails on a serial file ID */ - /* file locking allows only one file open (serial) for writing */ - if (MAINPROCESS) { - fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); - VRFY((fid >= 0), "H5Fopen succeeded"); - - /* should fail */ - H5E_BEGIN_TRY - { - ret = H5Fset_mpi_atomicity(fid, true); - } - H5E_END_TRY - VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed"); - - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - MPI_Barrier(comm); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); - VRFY((fid >= 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - ret = H5Fset_mpi_atomicity(fid, true); - VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded"); - - /* open dataset1 (contiguous case) */ - dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); - - if (0 == mpi_rank) { - for (i = 0; i < buf_size; i++) { - write_buf[i] = 5; - } - } - else { - for (i = 0; i < buf_size; i++) { - read_buf[i] = 8; - } - } - - /* check that the atomicity flag is set */ - ret = H5Fget_mpi_atomicity(fid, &atomicity); - VRFY((ret >= 0), "atomcity get failed"); - VRFY((atomicity == true), "atomcity set failed"); - - MPI_Barrier(comm); - - /* Process 0 writes contiguously to the entire dataset */ - if (0 == mpi_rank) { - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - } - /* The other processes read the entire dataset */ - else { - ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - } - - if (VERBOSE_MED) { - i = 0; - j = 0; - k = 0; - for (i = 0; i < dim0; i++) { - printf("\n"); - for (j = 0; j < dim1; j++) - printf("%d ", read_buf[k++]); - } - } - - /* The processes that read the dataset must either read all values - as 0 (read happened before process 0 wrote to dataset 1), or 5 - (read happened after process 0 wrote to dataset 1) */ - if (0 != mpi_rank) { - int compare = read_buf[0]; - - VRFY((compare == 0 || compare == 5), - "Atomicity Test Failed Process %d: Value read should be 0 or 5\n"); - for (i = 1; i < buf_size; i++) { - if (read_buf[i] != compare) { - printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, - read_buf[i], compare); - nerrors++; - } - } - } - - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5D close succeeded"); - - /* release data buffers */ - if (write_buf) - free(write_buf); - if (read_buf) - free(read_buf); - - /* open dataset2 (non-contiguous case) */ - dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dopen2 succeeded"); - - /* allocate memory for data buffer */ - write_buf = (int *)calloc((size_t)buf_size, sizeof(int)); - VRFY((write_buf != NULL), "write_buf calloc succeeded"); - /* allocate memory for data buffer */ - read_buf = (int *)calloc((size_t)buf_size, sizeof(int)); - VRFY((read_buf != NULL), "read_buf calloc succeeded"); - - for (i = 0; i < buf_size; i++) { - write_buf[i] = 5; - } - for (i = 0; i < buf_size; i++) { - read_buf[i] = 8; - } - - atomicity = false; - /* check that the atomicity flag is set */ - ret = H5Fget_mpi_atomicity(fid, &atomicity); - VRFY((ret >= 0), "atomcity get failed"); - VRFY((atomicity == true), "atomcity set failed"); - - block[0] = (hsize_t)(dim0 / mpi_size - 1); - block[1] = (hsize_t)(dim1 / mpi_size - 1); - stride[0] = block[0] + 1; - stride[1] = block[1] + 1; - count[0] = (hsize_t)mpi_size; - count[1] = (hsize_t)mpi_size; - start[0] = 0; - start[1] = 0; - - /* create a file dataspace */ - file_dataspace = H5Dget_space(dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace */ - mem_dataspace = H5Screate_simple(RANK, dims, NULL); - VRFY((mem_dataspace >= 0), ""); - - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - MPI_Barrier(comm); - - /* Process 0 writes to the dataset */ - if (0 == mpi_rank) { - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - } - /* All processes wait for the write to finish. This works because - atomicity is set to true */ - MPI_Barrier(comm); - /* The other processes read the entire dataset */ - if (0 != mpi_rank) { - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf); - VRFY((ret >= 0), "H5Dread dataset2 succeeded"); - } - - if (VERBOSE_MED) { - if (mpi_rank == 1) { - i = 0; - j = 0; - k = 0; - for (i = 0; i < dim0; i++) { - printf("\n"); - for (j = 0; j < dim1; j++) - printf("%d ", read_buf[k++]); - } - printf("\n"); - } - } - - /* The processes that read the dataset must either read all values - as 5 (read happened after process 0 wrote to dataset 1) */ - if (0 != mpi_rank) { - int compare; - i = 0; - j = 0; - k = 0; - - compare = 5; - - for (i = 0; i < dim0; i++) { - if (i >= mpi_rank * ((int)block[0] + 1)) { - break; - } - if ((i + 1) % ((int)block[0] + 1) == 0) { - k += dim1; - continue; - } - for (j = 0; j < dim1; j++) { - if (j >= mpi_rank * ((int)block[1] + 1)) { - k += dim1 - mpi_rank * ((int)block[1] + 1); - break; - } - if ((j + 1) % ((int)block[1] + 1) == 0) { - k++; - continue; - } - else if (compare != read_buf[k]) { - printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, - k, read_buf[k], compare); - nerrors++; - } - k++; - } - } - } - - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - - /* release data buffers */ - if (write_buf) - free(write_buf); - if (read_buf) - free(read_buf); - - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); -} - -/* Function: dense_attr_test - * - * Purpose: Test cases for writing dense attributes in parallel - * - */ -void -test_dense_attr(void) -{ - int mpi_size, mpi_rank; - hid_t fpid, fid; - hid_t gid, gpid; - hid_t atFileSpace, atid; - hsize_t atDims[1] = {10000}; - herr_t status; - const char *filename; - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, group, dataset, or attribute aren't supported with " - "this connector\n"); - fflush(stdout); - } - - return; - } - - /* get filename */ - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - assert(filename != NULL); - - fpid = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fpid > 0), "H5Pcreate succeeded"); - status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - VRFY((status >= 0), "H5Pset_libver_bounds succeeded"); - status = H5Pset_fapl_mpio(fpid, MPI_COMM_WORLD, MPI_INFO_NULL); - VRFY((status >= 0), "H5Pset_fapl_mpio succeeded"); - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid); - VRFY((fid > 0), "H5Fcreate succeeded"); - status = H5Pclose(fpid); - VRFY((status >= 0), "H5Pclose succeeded"); - - gpid = H5Pcreate(H5P_GROUP_CREATE); - VRFY((gpid > 0), "H5Pcreate succeeded"); - status = H5Pset_attr_phase_change(gpid, 0, 0); - VRFY((status >= 0), "H5Pset_attr_phase_change succeeded"); - gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT); - VRFY((gid > 0), "H5Gcreate2 succeeded"); - status = H5Pclose(gpid); - VRFY((status >= 0), "H5Pclose succeeded"); - - atFileSpace = H5Screate_simple(1, atDims, NULL); - VRFY((atFileSpace > 0), "H5Screate_simple succeeded"); - atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT); - VRFY((atid > 0), "H5Acreate succeeded"); - status = H5Sclose(atFileSpace); - VRFY((status >= 0), "H5Sclose succeeded"); - - status = H5Aclose(atid); - VRFY((status >= 0), "H5Aclose succeeded"); - - status = H5Gclose(gid); - VRFY((status >= 0), "H5Gclose succeeded"); - status = H5Fclose(fid); - VRFY((status >= 0), "H5Fclose succeeded"); - - return; -} diff --git a/testpar/API/t_file.c b/testpar/API/t_file.c deleted file mode 100644 index 61d009c2516..00000000000 --- a/testpar/API/t_file.c +++ /dev/null @@ -1,1044 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Parallel tests for file operations - */ - -#include "hdf5.h" -#include "testphdf5.h" - -#if 0 -#include "H5CXprivate.h" /* API Contexts */ -#include "H5Iprivate.h" -#include "H5PBprivate.h" - -/* - * This file needs to access private information from the H5F package. - */ -#define H5AC_FRIEND /*suppress error about including H5ACpkg */ -#include "H5ACpkg.h" -#define H5C_FRIEND /*suppress error about including H5Cpkg */ -#include "H5Cpkg.h" -#define H5F_FRIEND /*suppress error about including H5Fpkg */ -#define H5F_TESTING -#include "H5Fpkg.h" -#define H5MF_FRIEND /*suppress error about including H5MFpkg */ -#include "H5MFpkg.h" -#endif - -#define NUM_DSETS 5 - -int mpi_size, mpi_rank; - -#if 0 -static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy); -static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size, - size_t page_buffer_size); -#endif - -/* - * test file access by communicator besides COMM_WORLD. - * Split COMM_WORLD into two, one (even_comm) contains the original - * processes of even ranks. The other (odd_comm) contains the original - * processes of odd ranks. Processes in even_comm creates a file, then - * cloose it, using even_comm. Processes in old_comm just do a barrier - * using odd_comm. Then they all do a barrier using COMM_WORLD. - * If the file creation and cloose does not do correct collective action - * according to the communicator argument, the processes will freeze up - * sooner or later due to barrier mixed up. - */ -void -test_split_comm_access(void) -{ - MPI_Comm comm; - MPI_Info info = MPI_INFO_NULL; - int is_old, mrc; - int newrank, newprocs; - hid_t fid; /* file IDs */ - hid_t acc_tpl; /* File access properties */ - herr_t ret; /* generic return value */ - const char *filename; - - filename = (const char *)PARATESTFILE /* GetTestParameters()*/; - if (VERBOSE_MED) - printf("Split Communicator access test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - is_old = mpi_rank % 2; - mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm); - VRFY((mrc == MPI_SUCCESS), ""); - MPI_Comm_size(comm, &newprocs); - MPI_Comm_rank(comm, &newrank); - - if (is_old) { - /* odd-rank processes */ - mrc = MPI_Barrier(comm); - VRFY((mrc == MPI_SUCCESS), ""); - } - else { - /* even-rank processes */ - int sub_mpi_rank; /* rank in the sub-comm */ - MPI_Comm_rank(comm, &sub_mpi_rank); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* close the file */ - ret = H5Fclose(fid); - VRFY((ret >= 0), ""); - - /* delete the test file */ - ret = H5Fdelete(filename, acc_tpl); - VRFY((ret >= 0), "H5Fdelete succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - } - mrc = MPI_Comm_free(&comm); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded"); - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "final MPI_Barrier succeeded"); -} - -#if 0 -void -test_page_buffer_access(void) -{ - hid_t file_id = -1; /* File ID */ - hid_t fcpl, fapl; - size_t page_count = 0; - int i, num_elements = 200; - haddr_t raw_addr, meta_addr; - int *data; - H5F_t *f = NULL; - herr_t ret; /* generic return value */ - const char *filename; - bool api_ctx_pushed = false; /* Whether API context pushed */ - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - filename = (const char *)GetTestParameters(); - - if (VERBOSE_MED) - printf("Page Buffer Usage in Parallel %s\n", filename); - - fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl >= 0), "create_faccess_plist succeeded"); - fcpl = H5Pcreate(H5P_FILE_CREATE); - VRFY((fcpl >= 0), ""); - - ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0); - VRFY((ret == 0), ""); - ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 128); - VRFY((ret == 0), ""); - ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 100000, 0, 0); - VRFY((ret == 0), ""); - - /* This should fail because collective metadata writes are not supported with page buffering */ - H5E_BEGIN_TRY - { - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); - } - H5E_END_TRY - VRFY((file_id < 0), "H5Fcreate failed"); - - /* disable collective metadata writes for page buffering to work */ - ret = H5Pset_coll_metadata_write(fapl, false); - VRFY((ret >= 0), ""); - - ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); - VRFY((ret == 0), ""); - ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int) * 100, - sizeof(int) * 100000); - VRFY((ret == 0), ""); - - ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); - VRFY((ret == 0), ""); - ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int) * 100, - sizeof(int) * 100000); - VRFY((ret == 0), ""); - - ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 100); - VRFY((ret == 0), ""); - - data = (int *)malloc(sizeof(int) * (size_t)num_elements); - - /* initialize all the elements to have a value of -1 */ - for (i = 0; i < num_elements; i++) - data[i] = -1; - if (MAINPROCESS) { - hid_t fapl_self = H5I_INVALID_HID; - fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type); - - ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0); - VRFY((ret == 0), ""); - /* collective metadata writes do not work with page buffering */ - ret = H5Pset_coll_metadata_write(fapl_self, false); - VRFY((ret >= 0), ""); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self); - VRFY((file_id >= 0), ""); - - /* Push API context */ - ret = H5CX_push(); - VRFY((ret == 0), "H5CX_push()"); - api_ctx_pushed = true; - - /* Get a pointer to the internal file object */ - f = (H5F_t *)H5I_object(file_id); - - VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); - - /* allocate space for 200 raw elements */ - raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements); - VRFY((raw_addr != HADDR_UNDEF), ""); - - /* allocate space for 200 metadata elements */ - meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements); - VRFY((meta_addr != HADDR_UNDEF), ""); - - page_count = 0; - - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data); - VRFY((ret == 0), ""); - - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - /* update the first 50 elements */ - for (i = 0; i < 50; i++) - data[i] = i; - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); - H5Eprint2(H5E_DEFAULT, stderr); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - page_count += 2; - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - /* update the second 50 elements */ - for (i = 0; i < 50; i++) - data[i] = i + 50; - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - /* update 100 - 200 */ - for (i = 0; i < 100; i++) - data[i] = i + 100; - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - ret = H5PB_flush(f->shared); - VRFY((ret == 0), ""); - - /* read elements 0 - 200 */ - ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 200; i++) - VRFY((data[i] == i), "Read different values than written"); - ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 200; i++) - VRFY((data[i] == i), "Read different values than written"); - - /* read elements 0 - 50 */ - ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 50; i++) - VRFY((data[i] == i), "Read different values than written"); - ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 50; i++) - VRFY((data[i] == i), "Read different values than written"); - - /* close the file */ - ret = H5Fclose(file_id); - VRFY((ret >= 0), "H5Fclose succeeded"); - ret = H5Pclose(fapl_self); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* Pop API context */ - if (api_ctx_pushed) { - ret = H5CX_pop(false); - VRFY((ret == 0), "H5CX_pop()"); - api_ctx_pushed = false; - } - } - - MPI_Barrier(MPI_COMM_WORLD); - - if (mpi_size > 1) { - ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0); - VRFY((ret == 0), ""); - /* collective metadata writes do not work with page buffering */ - ret = H5Pset_coll_metadata_write(fapl, false); - VRFY((ret >= 0), ""); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); - VRFY((file_id >= 0), ""); - - /* Push API context */ - ret = H5CX_push(); - VRFY((ret == 0), "H5CX_push()"); - api_ctx_pushed = true; - - /* Get a pointer to the internal file object */ - f = (H5F_t *)H5I_object(file_id); - - VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); - - /* allocate space for 200 raw elements */ - raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements); - VRFY((raw_addr != HADDR_UNDEF), ""); - /* allocate space for 200 metadata elements */ - meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements); - VRFY((meta_addr != HADDR_UNDEF), ""); - - page_count = 0; - - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data); - VRFY((ret == 0), ""); - - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - /* update the first 50 elements */ - for (i = 0; i < 50; i++) - data[i] = i; - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - /* update the second 50 elements */ - for (i = 0; i < 50; i++) - data[i] = i + 50; - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - /* update 100 - 200 */ - for (i = 0; i < 100; i++) - data[i] = i + 100; - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data); - VRFY((ret == 0), ""); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((ret == 0), ""); - - /* read elements 0 - 200 */ - ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 200; i++) - VRFY((data[i] == i), "Read different values than written"); - ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 200; i++) - VRFY((data[i] == i), "Read different values than written"); - - /* read elements 0 - 50 */ - ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 50; i++) - VRFY((data[i] == i), "Read different values than written"); - ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - page_count += 1; - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 50; i++) - VRFY((data[i] == i), "Read different values than written"); - - MPI_Barrier(MPI_COMM_WORLD); - /* reset the first 50 elements to -1*/ - for (i = 0; i < 50; i++) - data[i] = -1; - ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - - /* read elements 0 - 50 */ - ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 50; i++) - VRFY((data[i] == -1), "Read different values than written"); - ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); - VRFY((ret == 0), ""); - VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i = 0; i < 50; i++) - VRFY((data[i] == -1), "Read different values than written"); - - /* close the file */ - ret = H5Fclose(file_id); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Pclose(fcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* Pop API context */ - if (api_ctx_pushed) { - ret = H5CX_pop(false); - VRFY((ret == 0), "H5CX_pop()"); - api_ctx_pushed = false; - } - - free(data); - data = NULL; - MPI_Barrier(MPI_COMM_WORLD); -} - -static int -create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy) -{ - hid_t file_id, dset_id, grp_id; - hid_t sid, mem_dataspace; - hsize_t start[RANK]; - hsize_t count[RANK]; - hsize_t stride[RANK]; - hsize_t block[RANK]; - DATATYPE *data_array = NULL; - hsize_t dims[RANK], i; - hsize_t num_elements; - int k; - char dset_name[20]; - H5F_t *f = NULL; - H5C_t *cache_ptr = NULL; - H5AC_cache_config_t config; - bool api_ctx_pushed = false; /* Whether API context pushed */ - herr_t ret; - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); - VRFY((file_id >= 0), ""); - - ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((ret == 0), ""); - - /* Push API context */ - ret = H5CX_push(); - VRFY((ret == 0), "H5CX_push()"); - api_ctx_pushed = true; - - f = (H5F_t *)H5I_object(file_id); - VRFY((f != NULL), ""); - - cache_ptr = f->shared->cache; - VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); - - cache_ptr->ignore_tags = true; - H5C_stats__reset(cache_ptr); - config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - - ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config); - VRFY((ret == 0), ""); - - config.metadata_write_strategy = metadata_write_strategy; - - ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config); - VRFY((ret == 0), ""); - - grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((grp_id >= 0), ""); - - dims[0] = (hsize_t)(ROW_FACTOR * mpi_size); - dims[1] = (hsize_t)(COL_FACTOR * mpi_size); - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* Each process takes a slabs of rows. */ - block[0] = dims[0] / (hsize_t)mpi_size; - block[1] = dims[1]; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)mpi_rank * block[0]; - start[1] = 0; - - num_elements = block[0] * block[1]; - /* allocate memory for data buffer */ - data_array = (DATATYPE *)malloc(num_elements * sizeof(DATATYPE)); - VRFY((data_array != NULL), "data_array malloc succeeded"); - /* put some trivial data in the data_array */ - for (i = 0; i < num_elements; i++) - data_array[i] = mpi_rank + 1; - - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(1, &num_elements, NULL); - VRFY((mem_dataspace >= 0), ""); - - for (k = 0; k < NUM_DSETS; k++) { - snprintf(dset_name, sizeof(dset_name), "D1dset%d", k); - dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0), ""); - ret = H5Dclose(dset_id); - VRFY((ret == 0), ""); - - snprintf(dset_name, sizeof(dset_name), "D2dset%d", k); - dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0), ""); - ret = H5Dclose(dset_id); - VRFY((ret == 0), ""); - - snprintf(dset_name, sizeof(dset_name), "D3dset%d", k); - dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0), ""); - ret = H5Dclose(dset_id); - VRFY((ret == 0), ""); - - snprintf(dset_name, sizeof(dset_name), "dset%d", k); - dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0), ""); - - ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); - VRFY((ret == 0), ""); - - ret = H5Dclose(dset_id); - VRFY((ret == 0), ""); - - memset(data_array, 0, num_elements * sizeof(DATATYPE)); - dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); - VRFY((dset_id >= 0), ""); - - ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); - VRFY((ret == 0), ""); - - ret = H5Dclose(dset_id); - VRFY((ret == 0), ""); - - for (i = 0; i < num_elements; i++) - VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed"); - - snprintf(dset_name, sizeof(dset_name), "D1dset%d", k); - ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); - VRFY((ret == 0), ""); - snprintf(dset_name, sizeof(dset_name), "D2dset%d", k); - ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); - VRFY((ret == 0), ""); - snprintf(dset_name, sizeof(dset_name), "D3dset%d", k); - ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); - VRFY((ret == 0), ""); - } - - ret = H5Gclose(grp_id); - VRFY((ret == 0), ""); - ret = H5Fclose(file_id); - VRFY((ret == 0), ""); - ret = H5Sclose(sid); - VRFY((ret == 0), ""); - ret = H5Sclose(mem_dataspace); - VRFY((ret == 0), ""); - - /* Pop API context */ - if (api_ctx_pushed) { - ret = H5CX_pop(false); - VRFY((ret == 0), "H5CX_pop()"); - api_ctx_pushed = false; - } - - MPI_Barrier(MPI_COMM_WORLD); - free(data_array); - return 0; -} /* create_file */ - -static int -open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size, - size_t page_buffer_size) -{ - hid_t file_id, dset_id, grp_id, grp_id2; - hid_t sid, mem_dataspace; - DATATYPE *data_array = NULL; - hsize_t dims[RANK]; - hsize_t start[RANK]; - hsize_t count[RANK]; - hsize_t stride[RANK]; - hsize_t block[RANK]; - int i, k, ndims; - hsize_t num_elements; - char dset_name[20]; - H5F_t *f = NULL; - H5C_t *cache_ptr = NULL; - H5AC_cache_config_t config; - bool api_ctx_pushed = false; /* Whether API context pushed */ - herr_t ret; - - config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - ret = H5Pget_mdc_config(fapl, &config); - VRFY((ret == 0), ""); - - config.metadata_write_strategy = metadata_write_strategy; - - ret = H5Pget_mdc_config(fapl, &config); - VRFY((ret == 0), ""); - - file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl); - H5Eprint2(H5E_DEFAULT, stderr); - VRFY((file_id >= 0), ""); - - /* Push API context */ - ret = H5CX_push(); - VRFY((ret == 0), "H5CX_push()"); - api_ctx_pushed = true; - - ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((ret == 0), ""); - - f = (H5F_t *)H5I_object(file_id); - VRFY((f != NULL), ""); - - cache_ptr = f->shared->cache; - VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); - - MPI_Barrier(MPI_COMM_WORLD); - - VRFY((f->shared->page_buf != NULL), ""); - VRFY((f->shared->page_buf->page_size == page_size), ""); - VRFY((f->shared->page_buf->max_size == page_buffer_size), ""); - - grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT); - VRFY((grp_id >= 0), ""); - - dims[0] = (hsize_t)(ROW_FACTOR * mpi_size); - dims[1] = (hsize_t)(COL_FACTOR * mpi_size); - - /* Each process takes a slabs of rows. */ - block[0] = dims[0] / (hsize_t)mpi_size; - block[1] = dims[1]; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)mpi_rank * block[0]; - start[1] = 0; - - num_elements = block[0] * block[1]; - /* allocate memory for data buffer */ - data_array = (DATATYPE *)malloc(num_elements * sizeof(DATATYPE)); - VRFY((data_array != NULL), "data_array malloc succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(1, &num_elements, NULL); - VRFY((mem_dataspace >= 0), ""); - - for (k = 0; k < NUM_DSETS; k++) { - snprintf(dset_name, sizeof(dset_name), "dset%d", k); - dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); - VRFY((dset_id >= 0), ""); - - sid = H5Dget_space(dset_id); - VRFY((dset_id >= 0), "H5Dget_space succeeded"); - - ndims = H5Sget_simple_extent_dims(sid, dims, NULL); - VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded"); - VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions"); - VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions"); - - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); - VRFY((ret >= 0), ""); - - ret = H5Dclose(dset_id); - VRFY((ret >= 0), ""); - ret = H5Sclose(sid); - VRFY((ret == 0), ""); - - for (i = 0; i < (int)num_elements; i++) - VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed"); - } - - grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((grp_id2 >= 0), ""); - ret = H5Gclose(grp_id2); - VRFY((ret == 0), ""); - - ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((ret == 0), ""); - - MPI_Barrier(MPI_COMM_WORLD); - /* flush invalidate each ring, starting from the outermost ring and - * working inward. - */ - for (i = 0; i < H5C__HASH_TABLE_LEN; i++) { - H5C_cache_entry_t *entry_ptr = NULL; - - entry_ptr = cache_ptr->index[i]; - - while (entry_ptr != NULL) { - assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - assert(entry_ptr->is_dirty == false); - - if (!entry_ptr->is_pinned && !entry_ptr->is_protected) { - ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0); - VRFY((ret == 0), ""); - } - - entry_ptr = entry_ptr->ht_next; - } - } - MPI_Barrier(MPI_COMM_WORLD); - - grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT); - H5Eprint2(H5E_DEFAULT, stderr); - VRFY((grp_id2 >= 0), ""); - ret = H5Gclose(grp_id2); - H5Eprint2(H5E_DEFAULT, stderr); - VRFY((ret == 0), ""); - - ret = H5Gclose(grp_id); - VRFY((ret == 0), ""); - ret = H5Fclose(file_id); - VRFY((ret == 0), ""); - ret = H5Sclose(mem_dataspace); - VRFY((ret == 0), ""); - - /* Pop API context */ - if (api_ctx_pushed) { - ret = H5CX_pop(false); - VRFY((ret == 0), "H5CX_pop()"); - api_ctx_pushed = false; - } - - free(data_array); - - return nerrors; -} -#endif - -/* - * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the - * incoming fapl that could conflict with the existing values in H5F_shared_t on - * multiple opens of the same file. - */ -void -test_file_properties(void) -{ - hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ - hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ - hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */ - bool is_coll; - htri_t are_equal; - const char *filename; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - MPI_Comm comm_out = MPI_COMM_NULL; - MPI_Info info_out = MPI_INFO_NULL; - herr_t ret; /* Generic return value */ - int mpi_ret; /* MPI return value */ - int cmp; /* Compare value */ - - /* set up MPI parameters */ - mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded"); - mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded"); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - - mpi_ret = MPI_Info_create(&info); - VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); - mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz"); - VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); - - /* setup file access plist */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); - ret = H5Pset_fapl_mpio(fapl_id, comm, info); - VRFY((ret >= 0), "H5Pset_fapl_mpio"); - - /* Check getting and setting MPI properties - * (for use in VOL connectors, not the MPI-I/O VFD) - */ - ret = H5Pset_mpi_params(fapl_id, comm, info); - VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); - ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out); - VRFY((ret >= 0), "H5Pget_mpi_params succeeded"); - - /* Check the communicator */ - VRFY((comm != comm_out), "Communicators should not be bitwise identical"); - cmp = MPI_UNEQUAL; - mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp); - VRFY((ret >= 0), "MPI_Comm_compare succeeded"); - VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare"); - - /* Check the info object */ - VRFY((info != info_out), "Info objects should not be bitwise identical"); - - /* Free the obtained comm and info object */ - mpi_ret = MPI_Comm_free(&comm_out); - VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded"); - mpi_ret = MPI_Info_free(&info_out); - VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); - - /* Copy the fapl and ensure it's equal to the original */ - fapl_copy_id = H5Pcopy(fapl_id); - VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy"); - are_equal = H5Pequal(fapl_id, fapl_copy_id); - VRFY((true == are_equal), "H5Pequal"); - - /* Add a property to the copy and ensure it's different now */ - mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc"); - VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); - ret = H5Pset_mpi_params(fapl_copy_id, comm, info); - VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); - are_equal = H5Pequal(fapl_id, fapl_copy_id); - VRFY((false == are_equal), "H5Pequal"); - - /* Add a property with the same key but a different value to the original - * and ensure they are still different. - */ - mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk"); - VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); - ret = H5Pset_mpi_params(fapl_id, comm, info); - VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); - are_equal = H5Pequal(fapl_id, fapl_copy_id); - VRFY((false == are_equal), "H5Pequal"); - - /* Set the second property in the original to the same - * value as the copy and ensure they are the same now. - */ - mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc"); - VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); - ret = H5Pset_mpi_params(fapl_id, comm, info); - VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); - are_equal = H5Pequal(fapl_id, fapl_copy_id); - VRFY((true == are_equal), "H5Pequal"); - - /* create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); - - /* verify settings for file access properties */ - - /* Collective metadata writes */ - ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); - VRFY((is_coll == false), "Incorrect property setting for coll metadata writes"); - - /* Collective metadata read API calling requirement */ - ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); - VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement"); - - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - - /* Open the file with the MPI-IO driver */ - ret = H5Pset_fapl_mpio(fapl_id, comm, info); - VRFY((ret >= 0), "H5Pset_fapl_mpio failed"); - fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); - VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); - - /* verify settings for file access properties */ - - /* Collective metadata writes */ - ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); - VRFY((is_coll == false), "Incorrect property setting for coll metadata writes"); - - /* Collective metadata read API calling requirement */ - ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); - VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement"); - - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - - /* Open the file with the MPI-IO driver w/ collective settings */ - ret = H5Pset_fapl_mpio(fapl_id, comm, info); - VRFY((ret >= 0), "H5Pset_fapl_mpio failed"); - /* Collective metadata writes */ - ret = H5Pset_coll_metadata_write(fapl_id, true); - VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); - /* Collective metadata read API calling requirement */ - ret = H5Pset_all_coll_metadata_ops(fapl_id, true); - VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); - fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); - VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); - - /* verify settings for file access properties */ - - /* Collective metadata writes */ - ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); - VRFY((is_coll == true), "Incorrect property setting for coll metadata writes"); - - /* Collective metadata read API calling requirement */ - ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); - VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement"); - - /* close fapl and retrieve it from file */ - ret = H5Pclose(fapl_id); - VRFY((ret >= 0), "H5Pclose succeeded"); - fapl_id = H5I_INVALID_HID; - - fapl_id = H5Fget_access_plist(fid); - VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS"); - - /* verify settings for file access properties */ - - /* Collective metadata writes */ - ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); - VRFY((is_coll == true), "Incorrect property setting for coll metadata writes"); - - /* Collective metadata read API calling requirement */ - ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); - VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); - VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement"); - - /* close file */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - - /* Release file-access plist */ - ret = H5Pclose(fapl_id); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Pclose(fapl_copy_id); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* Free the MPI info object */ - mpi_ret = MPI_Info_free(&info); - VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); - -} /* end test_file_properties() */ - -void -test_delete(void) -{ - hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ - hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ - const char *filename = NULL; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - htri_t is_hdf5 = FAIL; /* Whether a file is an HDF5 file */ - herr_t ret; /* Generic return value */ - - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or file more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* setup file access plist */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); - ret = H5Pset_fapl_mpio(fapl_id, comm, info); - VRFY((SUCCEED == ret), "H5Pset_fapl_mpio"); - - /* create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((fid != H5I_INVALID_HID), "H5Fcreate"); - - /* close the file */ - ret = H5Fclose(fid); - VRFY((SUCCEED == ret), "H5Fclose"); - - /* Verify that the file is an HDF5 file */ - is_hdf5 = H5Fis_accessible(filename, fapl_id); - VRFY((true == is_hdf5), "H5Fis_accessible"); - - /* Delete the file */ - ret = H5Fdelete(filename, fapl_id); - VRFY((SUCCEED == ret), "H5Fdelete"); - - /* Verify that the file is NO LONGER an HDF5 file */ - /* This should fail since there is no file */ - H5E_BEGIN_TRY - { - is_hdf5 = H5Fis_accessible(filename, fapl_id); - } - H5E_END_TRY - VRFY((is_hdf5 != SUCCEED), "H5Fis_accessible"); - - /* Release file-access plist */ - ret = H5Pclose(fapl_id); - VRFY((SUCCEED == ret), "H5Pclose"); - -} /* end test_delete() */ diff --git a/testpar/API/t_file_image.c b/testpar/API/t_file_image.c deleted file mode 100644 index 3b582adbcc1..00000000000 --- a/testpar/API/t_file_image.c +++ /dev/null @@ -1,385 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Parallel tests for file image operations - */ - -#include "hdf5.h" -#include "testphdf5.h" - -/* file_image_daisy_chain_test - * - * Process zero: - * - * 1) Creates a core file with an integer vector data set of - * length n (= mpi_size), - * - * 2) Initializes the vector to zero in * location 0, and to -1 - * everywhere else. - * - * 3) Flushes the core file, and gets an image of it. Closes - * the core file. - * - * 4) Sends the image to process 1. - * - * 5) Awaits receipt on a file image from process n-1. - * - * 6) opens the image received from process n-1, verifies that - * it contains a vector of length equal to mpi_size, and - * that the vector contains (0, 1, 2, ... n-1) - * - * 7) closes the core file and exits. - * - * Process i (0 < i < n) - * - * 1) Await receipt of file image from process (i - 1). - * - * 2) Open the image with the core file driver, verify that i - * contains a vector v of length, and that v[j] = j for - * 0 <= j < i, and that v[j] == -1 for i <= j < n - * - * 3) Set v[i] = i in the core file. - * - * 4) Flush the core file and send it to process (i + 1) % n. - * - * 5) close the core file and exit. - * - * Test fails on a hang (if an image is not received), or on invalid data. - * - * JRM -- 11/28/11 - */ -void -file_image_daisy_chain_test(void) -{ - char file_name[1024] = "\0"; - int mpi_size, mpi_rank; - int mpi_result; - int i; - int space_ndims; - MPI_Status rcvstat; - int *vector_ptr = NULL; - hid_t fapl_id = -1; - hid_t file_id; /* file IDs */ - hid_t dset_id = -1; - hid_t dset_type_id = -1; - hid_t space_id = -1; - herr_t err; - hsize_t dims[1]; - void *image_ptr = NULL; - ssize_t bytes_read; - ssize_t image_len; - bool vector_ok = true; - htri_t tri_result; - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* setup file name */ - snprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank); - - if (mpi_rank == 0) { - - /* 1) Creates a core file with an integer vector data set - * of length mpi_size, - */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id >= 0), "creating fapl"); - - err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false); - VRFY((err >= 0), "setting core file driver in fapl."); - - file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0), "created core file"); - - dims[0] = (hsize_t)mpi_size; - space_id = H5Screate_simple(1, dims, dims); - VRFY((space_id >= 0), "created data space"); - - dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0), "created data set"); - - /* 2) Initialize the vector to zero in location 0, and - * to -1 everywhere else. - */ - - vector_ptr = (int *)malloc((size_t)(mpi_size) * sizeof(int)); - VRFY((vector_ptr != NULL), "allocated in memory representation of vector"); - - vector_ptr[0] = 0; - for (i = 1; i < mpi_size; i++) - vector_ptr[i] = -1; - - err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); - VRFY((err >= 0), "wrote initial data to vector."); - - free(vector_ptr); - vector_ptr = NULL; - - /* 3) Flush the core file, and get an image of it. Close - * the core file. - */ - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((err >= 0), "flushed core file."); - - image_len = H5Fget_file_image(file_id, NULL, (size_t)0); - VRFY((image_len > 0), "got image file size"); - - image_ptr = (void *)malloc((size_t)image_len); - VRFY(image_ptr != NULL, "allocated file image buffer."); - - bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len); - VRFY(bytes_read == image_len, "wrote file into image buffer"); - - err = H5Sclose(space_id); - VRFY((err >= 0), "closed data space."); - - err = H5Dclose(dset_id); - VRFY((err >= 0), "closed data set."); - - err = H5Fclose(file_id); - VRFY((err >= 0), "closed core file(1)."); - - err = H5Pclose(fapl_id); - VRFY((err >= 0), "closed fapl(1)."); - - /* 4) Send the image to process 1. */ - - mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, 1, 0, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1"); - - mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, 1, 0, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1"); - - free(image_ptr); - image_ptr = NULL; - image_len = 0; - - /* 5) Await receipt on a file image from process n-1. */ - - mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_size - 1, 0, - MPI_COMM_WORLD, &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1"); - - image_ptr = (void *)malloc((size_t)image_len); - VRFY(image_ptr != NULL, "allocated file image receive buffer."); - - mpi_result = - MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), "received file image from process n-1"); - - /* 6) open the image received from process n-1, verify that - * it contains a vector of length equal to mpi_size, and - * that the vector contains (0, 1, 2, ... n-1). - */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id >= 0), "creating fapl"); - - err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false); - VRFY((err >= 0), "setting core file driver in fapl."); - - err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); - VRFY((err >= 0), "set file image in fapl."); - - file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id); - VRFY((file_id >= 0), "opened received file image file"); - - dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); - VRFY((dset_id >= 0), "opened data set"); - - dset_type_id = H5Dget_type(dset_id); - VRFY((dset_type_id >= 0), "obtained data set type"); - - tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); - VRFY((tri_result == true), "verified data set type"); - - space_id = H5Dget_space(dset_id); - VRFY((space_id >= 0), "opened data space"); - - space_ndims = H5Sget_simple_extent_ndims(space_id); - VRFY((space_ndims == 1), "verified data space num dims(1)"); - - space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); - VRFY((space_ndims == 1), "verified data space num dims(2)"); - VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); - - vector_ptr = (int *)malloc((size_t)(mpi_size) * sizeof(int)); - VRFY((vector_ptr != NULL), "allocated in memory rep of vector"); - - err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); - VRFY((err >= 0), "read received vector."); - - vector_ok = true; - for (i = 0; i < mpi_size; i++) - if (vector_ptr[i] != i) - vector_ok = false; - VRFY((vector_ok), "verified received vector."); - - free(vector_ptr); - vector_ptr = NULL; - - /* 7) closes the core file and exit. */ - - err = H5Sclose(space_id); - VRFY((err >= 0), "closed data space."); - - err = H5Dclose(dset_id); - VRFY((err >= 0), "closed data set."); - - err = H5Fclose(file_id); - VRFY((err >= 0), "closed core file(1)."); - - err = H5Pclose(fapl_id); - VRFY((err >= 0), "closed fapl(1)."); - - free(image_ptr); - image_ptr = NULL; - image_len = 0; - } - else { - /* 1) Await receipt of file image from process (i - 1). */ - - mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_rank - 1, 0, - MPI_COMM_WORLD, &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), "received image size from process mpi_rank-1"); - - image_ptr = (void *)malloc((size_t)image_len); - VRFY(image_ptr != NULL, "allocated file image receive buffer."); - - mpi_result = - MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), "received file image from process mpi_rank-1"); - - /* 2) Open the image with the core file driver, verify that it - * contains a vector v of length, and that v[j] = j for - * 0 <= j < i, and that v[j] == -1 for i <= j < n - */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id >= 0), "creating fapl"); - - err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false); - VRFY((err >= 0), "setting core file driver in fapl."); - - err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); - VRFY((err >= 0), "set file image in fapl."); - - file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id); - H5Eprint2(H5P_DEFAULT, stderr); - VRFY((file_id >= 0), "opened received file image file"); - - dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); - VRFY((dset_id >= 0), "opened data set"); - - dset_type_id = H5Dget_type(dset_id); - VRFY((dset_type_id >= 0), "obtained data set type"); - - tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); - VRFY((tri_result == true), "verified data set type"); - - space_id = H5Dget_space(dset_id); - VRFY((space_id >= 0), "opened data space"); - - space_ndims = H5Sget_simple_extent_ndims(space_id); - VRFY((space_ndims == 1), "verified data space num dims(1)"); - - space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); - VRFY((space_ndims == 1), "verified data space num dims(2)"); - VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); - - vector_ptr = (int *)malloc((size_t)(mpi_size) * sizeof(int)); - VRFY((vector_ptr != NULL), "allocated in memory rep of vector"); - - err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); - VRFY((err >= 0), "read received vector."); - - vector_ok = true; - for (i = 0; i < mpi_size; i++) { - if (i < mpi_rank) { - if (vector_ptr[i] != i) - vector_ok = false; - } - else { - if (vector_ptr[i] != -1) - vector_ok = false; - } - } - VRFY((vector_ok), "verified received vector."); - - /* 3) Set v[i] = i in the core file. */ - - vector_ptr[mpi_rank] = mpi_rank; - - err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); - VRFY((err >= 0), "wrote modified data to vector."); - - free(vector_ptr); - vector_ptr = NULL; - - /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */ - - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((err >= 0), "flushed core file."); - - image_len = H5Fget_file_image(file_id, NULL, (size_t)0); - VRFY((image_len > 0), "got (possibly modified) image file len"); - - image_ptr = (void *)realloc((void *)image_ptr, (size_t)image_len); - VRFY(image_ptr != NULL, "re-allocated file image buffer."); - - bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len); - VRFY(bytes_read == image_len, "wrote file into image buffer"); - - mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, - (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), "sent image size to process (mpi_rank + 1) % mpi_size"); - - mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, (mpi_rank + 1) % mpi_size, 0, - MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), "sent image to process (mpi_rank + 1) % mpi_size"); - - free(image_ptr); - image_ptr = NULL; - image_len = 0; - - /* 5) close the core file and exit. */ - - err = H5Sclose(space_id); - VRFY((err >= 0), "closed data space."); - - err = H5Dclose(dset_id); - VRFY((err >= 0), "closed data set."); - - err = H5Fclose(file_id); - VRFY((err >= 0), "closed core file(1)."); - - err = H5Pclose(fapl_id); - VRFY((err >= 0), "closed fapl(1)."); - } - - return; - -} /* file_image_daisy_chain_test() */ diff --git a/testpar/API/t_filter_read.c b/testpar/API/t_filter_read.c deleted file mode 100644 index 7275dd98b2a..00000000000 --- a/testpar/API/t_filter_read.c +++ /dev/null @@ -1,532 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * This verifies the correctness of parallel reading of a dataset that has been - * written serially using filters. - */ - -#include "hdf5.h" -#include "testphdf5.h" - -#ifdef H5_HAVE_SZLIB_H -#include "szlib.h" -#endif - -static int mpi_size, mpi_rank; - -/* Chunk sizes */ -#define CHUNK_DIM1 7 -#define CHUNK_DIM2 27 - -/* Sizes of the vertical hyperslabs. Total dataset size is - {HS_DIM1, HS_DIM2 * mpi_size } */ -#define HS_DIM1 200 -#define HS_DIM2 100 - -#ifdef H5_HAVE_FILTER_SZIP - -/*------------------------------------------------------------------------- - * Function: h5_szip_can_encode - * - * Purpose: Retrieve the filter config flags for szip, tell if - * encoder is available. - * - * Return: 1: decode+encode is enabled - * 0: only decode is enabled - * -1: other - *------------------------------------------------------------------------- - */ -int -h5_szip_can_encode(void) -{ - unsigned int filter_config_flags; - - H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags); - if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) { - /* filter present but neither encode nor decode is supported (???) */ - return -1; - } - else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == - H5Z_FILTER_CONFIG_DECODE_ENABLED) { - /* decoder only: read but not write */ - return 0; - } - else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == - H5Z_FILTER_CONFIG_ENCODE_ENABLED) { - /* encoder only: write but not read (???) */ - return -1; - } - else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == - (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) { - return 1; - } - return (-1); -} -#endif /* H5_HAVE_FILTER_SZIP */ - -/*------------------------------------------------------------------------- - * Function: filter_read_internal - * - * Purpose: Tests parallel reading of a 2D dataset written serially using - * filters. During the parallel reading phase, the dataset is - * divided evenly among the processors in vertical hyperslabs. - *------------------------------------------------------------------------- - */ -static void -filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) -{ - hid_t file, dataset; /* HDF5 IDs */ - hid_t access_plist; /* Access property list ID */ - hid_t sid, memspace; /* Dataspace IDs */ - hsize_t size[2]; /* Dataspace dimensions */ - hsize_t hs_offset[2]; /* Hyperslab offset */ - hsize_t hs_size[2]; /* Hyperslab size */ - size_t i, j; /* Local index variables */ - char name[32] = "dataset"; - herr_t hrc; /* Error status */ - int *points = NULL; /* Writing buffer for entire dataset */ - int *check = NULL; /* Reading buffer for selected hyperslab */ - - (void)dset_size; /* silence compiler */ - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* set sizes for dataset and hyperslabs */ - hs_size[0] = size[0] = HS_DIM1; - hs_size[1] = HS_DIM2; - - size[1] = hs_size[1] * (hsize_t)mpi_size; - - hs_offset[0] = 0; - hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank; - - /* Create the data space */ - sid = H5Screate_simple(2, size, NULL); - VRFY(sid >= 0, "H5Screate_simple"); - - /* Create buffers */ - points = (int *)malloc(size[0] * size[1] * sizeof(int)); - VRFY(points != NULL, "malloc"); - - check = (int *)malloc(hs_size[0] * hs_size[1] * sizeof(int)); - VRFY(check != NULL, "malloc"); - - /* Initialize writing buffer with random data */ - for (i = 0; i < size[0]; i++) - for (j = 0; j < size[1]; j++) - points[i * size[1] + j] = (int)(i + j + 7); - - VRFY(H5Pall_filters_avail(dcpl), "Incorrect filter availability"); - - /* Serial write phase */ - if (MAINPROCESS) { - - file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - VRFY(file >= 0, "H5Fcreate"); - - /* Create the dataset */ - dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY(dataset >= 0, "H5Dcreate2"); - - hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points); - VRFY(hrc >= 0, "H5Dwrite"); -#if 0 - *dset_size = H5Dget_storage_size(dataset); - VRFY(*dset_size > 0, "H5Dget_storage_size"); -#endif - - hrc = H5Dclose(dataset); - VRFY(hrc >= 0, "H5Dclose"); - - hrc = H5Fclose(file); - VRFY(hrc >= 0, "H5Fclose"); - } - - MPI_Barrier(MPI_COMM_WORLD); - - /* Parallel read phase */ - /* Set up MPIO file access property lists */ - access_plist = H5Pcreate(H5P_FILE_ACCESS); - VRFY((access_plist >= 0), "H5Pcreate"); - - hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); - VRFY((hrc >= 0), "H5Pset_fapl_mpio"); - - /* Open the file */ - file = H5Fopen(filename, H5F_ACC_RDWR, access_plist); - VRFY((file >= 0), "H5Fopen"); - - dataset = H5Dopen2(file, name, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dopen2"); - - hrc = H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL); - VRFY(hrc >= 0, "H5Sselect_hyperslab"); - - memspace = H5Screate_simple(2, hs_size, NULL); - VRFY(memspace >= 0, "H5Screate_simple"); - - hrc = H5Dread(dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check); - VRFY(hrc >= 0, "H5Dread"); - - /* Check that the values read are the same as the values written */ - for (i = 0; i < hs_size[0]; i++) { - for (j = 0; j < hs_size[1]; j++) { - if (points[i * size[1] + (size_t)hs_offset[1] + j] != check[i * hs_size[1] + j]) { - fprintf(stderr, " Read different values than written.\n"); - fprintf(stderr, " At index %lu,%lu\n", (unsigned long)(i), - (unsigned long)(hs_offset[1] + j)); - fprintf(stderr, " At original: %d\n", (int)points[i * size[1] + (size_t)hs_offset[1] + j]); - fprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]); - VRFY(false, ""); - } - } - } -#if 0 - /* Get the storage size of the dataset */ - *dset_size = H5Dget_storage_size(dataset); - VRFY(*dset_size != 0, "H5Dget_storage_size"); -#endif - - /* Clean up objects used for this test */ - hrc = H5Dclose(dataset); - VRFY(hrc >= 0, "H5Dclose"); - - hrc = H5Sclose(sid); - VRFY(hrc >= 0, "H5Sclose"); - - hrc = H5Sclose(memspace); - VRFY(hrc >= 0, "H5Sclose"); - - hrc = H5Pclose(access_plist); - VRFY(hrc >= 0, "H5Pclose"); - - hrc = H5Fclose(file); - VRFY(hrc >= 0, "H5Fclose"); - - free(points); - free(check); - - MPI_Barrier(MPI_COMM_WORLD); -} - -/*------------------------------------------------------------------------- - * Function: test_filter_read - * - * Purpose: Tests parallel reading of datasets written serially using - * several (combinations of) filters. - *------------------------------------------------------------------------- - */ - -void -test_filter_read(void) -{ - hid_t dc; /* HDF5 IDs */ - const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */ -#if 0 - hsize_t null_size; /* Size of dataset without filters */ -#endif - unsigned chunk_opts; /* Chunk options */ - unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ - herr_t hrc; - const char *filename; -#ifdef H5_HAVE_FILTER_FLETCHER32 - hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */ -#endif - -#ifdef H5_HAVE_FILTER_DEFLATE - hsize_t deflate_size; /* Size of dataset with deflate filter */ -#endif /* H5_HAVE_FILTER_DEFLATE */ - -#ifdef H5_HAVE_FILTER_SZIP - hsize_t szip_size; /* Size of dataset with szip filter */ - unsigned szip_options_mask = H5_SZIP_NN_OPTION_MASK; - unsigned szip_pixels_per_block = 4; -#endif /* H5_HAVE_FILTER_SZIP */ - -#if 0 - hsize_t shuffle_size; /* Size of dataset with shuffle filter */ -#endif - -#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP) - hsize_t combo_size; /* Size of dataset with multiple filters */ -#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */ - - filename = PARATESTFILE /* GetTestParameters() */; - - if (VERBOSE_MED) - printf("Parallel reading of dataset written with filters %s\n", filename); - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf( - " API functions for basic file, dataset or filter aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - /*---------------------------------------------------------- - * STEP 0: Test without filters. - *---------------------------------------------------------- - */ - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - filter_read_internal(filename, dc, /* &null_size */ NULL); - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - - /* Run steps 1-3 both with and without filters disabled on partial chunks */ - for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; - disable_partial_chunk_filters++) { - /* Set chunk options appropriately */ - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_filter"); - - hrc = H5Pget_chunk_opts(dc, &chunk_opts); - VRFY(hrc >= 0, "H5Pget_chunk_opts"); - - if (disable_partial_chunk_filters) - chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; - - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - - /*---------------------------------------------------------- - * STEP 1: Test Fletcher32 Checksum by itself. - *---------------------------------------------------------- - */ -#ifdef H5_HAVE_FILTER_FLETCHER32 - - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pset_filter"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_filter"); - - hrc = H5Pset_chunk_opts(dc, chunk_opts); - VRFY(hrc >= 0, "H5Pset_chunk_opts"); - - hrc = H5Pset_filter(dc, H5Z_FILTER_FLETCHER32, 0, 0, NULL); - VRFY(hrc >= 0, "H5Pset_filter"); - - filter_read_internal(filename, dc, &fletcher32_size); - VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect."); - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - -#endif /* H5_HAVE_FILTER_FLETCHER32 */ - - /*---------------------------------------------------------- - * STEP 2: Test deflation by itself. - *---------------------------------------------------------- - */ -#ifdef H5_HAVE_FILTER_DEFLATE - - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - hrc = H5Pset_chunk_opts(dc, chunk_opts); - VRFY(hrc >= 0, "H5Pset_chunk_opts"); - - hrc = H5Pset_deflate(dc, 6); - VRFY(hrc >= 0, "H5Pset_deflate"); - - filter_read_internal(filename, dc, &deflate_size); - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - -#endif /* H5_HAVE_FILTER_DEFLATE */ - - /*---------------------------------------------------------- - * STEP 3: Test szip compression by itself. - *---------------------------------------------------------- - */ -#ifdef H5_HAVE_FILTER_SZIP - if (h5_szip_can_encode() == 1) { - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - hrc = H5Pset_chunk_opts(dc, chunk_opts); - VRFY(hrc >= 0, "H5Pset_chunk_opts"); - - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); - VRFY(hrc >= 0, "H5Pset_szip"); - - filter_read_internal(filename, dc, &szip_size); - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - } -#endif /* H5_HAVE_FILTER_SZIP */ - } /* end for */ - - /*---------------------------------------------------------- - * STEP 4: Test shuffling by itself. - *---------------------------------------------------------- - */ - - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - hrc = H5Pset_shuffle(dc); - VRFY(hrc >= 0, "H5Pset_shuffle"); - - filter_read_internal(filename, dc, /* &shuffle_size */ NULL); -#if 0 - VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size."); -#endif - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - - /*---------------------------------------------------------- - * STEP 5: Test shuffle + deflate + checksum in any order. - *---------------------------------------------------------- - */ -#ifdef H5_HAVE_FILTER_DEFLATE - /* Testing shuffle+deflate+checksum filters (checksum first) */ - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - hrc = H5Pset_fletcher32(dc); - VRFY(hrc >= 0, "H5Pset_fletcher32"); - - hrc = H5Pset_shuffle(dc); - VRFY(hrc >= 0, "H5Pset_shuffle"); - - hrc = H5Pset_deflate(dc, 6); - VRFY(hrc >= 0, "H5Pset_deflate"); - - filter_read_internal(filename, dc, &combo_size); - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - - /* Testing shuffle+deflate+checksum filters (checksum last) */ - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - hrc = H5Pset_shuffle(dc); - VRFY(hrc >= 0, "H5Pset_shuffle"); - - hrc = H5Pset_deflate(dc, 6); - VRFY(hrc >= 0, "H5Pset_deflate"); - - hrc = H5Pset_fletcher32(dc); - VRFY(hrc >= 0, "H5Pset_fletcher32"); - - filter_read_internal(filename, dc, &combo_size); - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - -#endif /* H5_HAVE_FILTER_DEFLATE */ - - /*---------------------------------------------------------- - * STEP 6: Test shuffle + szip + checksum in any order. - *---------------------------------------------------------- - */ -#ifdef H5_HAVE_FILTER_SZIP - - /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */ - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - hrc = H5Pset_fletcher32(dc); - VRFY(hrc >= 0, "H5Pset_fletcher32"); - - hrc = H5Pset_shuffle(dc); - VRFY(hrc >= 0, "H5Pset_shuffle"); - - /* Make sure encoding is enabled */ - if (h5_szip_can_encode() == 1) { - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); - VRFY(hrc >= 0, "H5Pset_szip"); - - filter_read_internal(filename, dc, &combo_size); - } - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - - /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */ - /* Make sure encoding is enabled */ - if (h5_szip_can_encode() == 1) { - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc >= 0, "H5Pcreate"); - - hrc = H5Pset_chunk(dc, 2, chunk_size); - VRFY(hrc >= 0, "H5Pset_chunk"); - - hrc = H5Pset_shuffle(dc); - VRFY(hrc >= 0, "H5Pset_shuffle"); - - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); - VRFY(hrc >= 0, "H5Pset_szip"); - - hrc = H5Pset_fletcher32(dc); - VRFY(hrc >= 0, "H5Pset_fletcher32"); - - filter_read_internal(filename, dc, &combo_size); - - /* Clean up objects used for this test */ - hrc = H5Pclose(dc); - VRFY(hrc >= 0, "H5Pclose"); - } - -#endif /* H5_HAVE_FILTER_SZIP */ -} diff --git a/testpar/API/t_mdset.c b/testpar/API/t_mdset.c deleted file mode 100644 index 7c97898252d..00000000000 --- a/testpar/API/t_mdset.c +++ /dev/null @@ -1,2827 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#include "hdf5.h" -#include "testphdf5.h" - -#if 0 -#include "H5Dprivate.h" -#include "H5private.h" -#endif - -#define DIM 2 -#define SIZE 32 -#define NDATASET 4 -#define GROUP_DEPTH 32 -enum obj_type { is_group, is_dset }; - -static int get_size(void); -static void write_dataset(hid_t, hid_t, hid_t); -static int read_dataset(hid_t, hid_t, hid_t); -static void create_group_recursive(hid_t, hid_t, hid_t, int); -static void recursive_read_group(hid_t, hid_t, hid_t, int); -static void group_dataset_read(hid_t fid, int mpi_rank, int m); -static void write_attribute(hid_t, int, int); -static int read_attribute(hid_t, int, int); -static int check_value(DATATYPE *, DATATYPE *, int); -static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int); - -/* - * The size value computed by this function is used extensively in - * configuring tests for the current number of processes. - * - * This function was created as part of an effort to allow the - * test functions in this file to run on an arbitrary number of - * processors. - * JRM - 8/11/04 - */ - -static int -get_size(void) -{ - int mpi_rank; - int mpi_size; - int size = SIZE; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - if (mpi_size > size) { - if ((mpi_size % 2) == 0) { - size = mpi_size; - } - else { - size = mpi_size + 1; - } - } - - VRFY((mpi_size <= size), "mpi_size <= size"); - VRFY(((size % 2) == 0), "size isn't even"); - - return (size); - -} /* get_size() */ - -/* - * Example of using PHDF5 to create a zero sized dataset. - * - */ -void -zero_dim_dset(void) -{ - int mpi_size, mpi_rank; - const char *filename; - hid_t fid, plist, dcpl, dsid, sid; - hsize_t dim, chunk_dim; - herr_t ret; - int data[1]; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - filename = PARATESTFILE /* GetTestParameters() */; - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((plist >= 0), "create_faccess_plist succeeded"); - - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - VRFY((fid >= 0), "H5Fcreate succeeded"); - ret = H5Pclose(plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "failed H5Pcreate"); - - /* Set 1 chunk size */ - chunk_dim = 1; - ret = H5Pset_chunk(dcpl, 1, &chunk_dim); - VRFY((ret >= 0), "failed H5Pset_chunk"); - - /* Create 1D dataspace with 0 dim size */ - dim = 0; - sid = H5Screate_simple(1, &dim, NULL); - VRFY((sid >= 0), "failed H5Screate_simple"); - - /* Create chunked dataset */ - dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dsid >= 0), "failed H5Dcreate2"); - - /* write 0 elements from dataset */ - ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data); - VRFY((ret >= 0), "failed H5Dwrite"); - - /* Read 0 elements from dataset */ - ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data); - VRFY((ret >= 0), "failed H5Dread"); - - H5Pclose(dcpl); - H5Dclose(dsid); - H5Sclose(sid); - H5Fclose(fid); -} - -/* - * Example of using PHDF5 to create ndatasets datasets. Each process write - * a slab of array to the file. - */ -void -multiple_dset_write(void) -{ - int i, j, n, mpi_size, mpi_rank, size; - hid_t iof, plist, dataset, memspace, filespace; - hid_t dcpl; /* Dataset creation property list */ - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], file_dims[DIM]; - hsize_t count[DIM] = {1, 1}; - double *outme = NULL; - double fill = 1.0; /* Fill value */ - char dname[100]; - herr_t ret; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - int ndatasets; - -#if 0 - pt = GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - /* ndatasets = pt->count; */ ndatasets = NDATASETS; - - size = get_size(); - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - outme = malloc((size_t)size * (size_t)size * sizeof(double)); - VRFY((outme != NULL), "malloc succeeded for outme"); - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((plist >= 0), "create_faccess_plist succeeded"); - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - VRFY((iof >= 0), "H5Fcreate succeeded"); - ret = H5Pclose(plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* decide the hyperslab according to process number. */ - get_slab(chunk_origin, chunk_dims, count, file_dims, size); - - memspace = H5Screate_simple(DIM, chunk_dims, NULL); - filespace = H5Screate_simple(DIM, file_dims, NULL); - ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - VRFY((ret >= 0), "mdata hyperslab selection"); - - /* Create a dataset creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation property list succeeded"); - - ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill); - VRFY((ret >= 0), "set fill-value succeeded"); - - for (n = 0; n < ndatasets; n++) { - snprintf(dname, sizeof(dname), "dataset %d", n); - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset > 0), dname); - - /* calculate data to write */ - for (i = 0; i < size; i++) - for (j = 0; j < size; j++) - outme[(i * size) + j] = n * 1000 + mpi_rank; - - H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme); - - H5Dclose(dataset); -#ifdef BARRIER_CHECKS - if (!((n + 1) % 10)) { - printf("created %d datasets\n", n + 1); - MPI_Barrier(MPI_COMM_WORLD); - } -#endif /* BARRIER_CHECKS */ - } - - H5Sclose(filespace); - H5Sclose(memspace); - H5Pclose(dcpl); - H5Fclose(iof); - - free(outme); -} - -/* Example of using PHDF5 to create, write, and read compact dataset. - */ -void -compact_dataset(void) -{ - int i, j, mpi_size, mpi_rank, size, err_num = 0; - hid_t iof, plist, dcpl, dxpl, dataset, filespace; - hsize_t file_dims[DIM]; - double *outme; - double *inme; - char dname[] = "dataset"; - herr_t ret; - const char *filename; -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - bool prop_value; -#endif - - size = get_size(); - - for (i = 0; i < DIM; i++) - file_dims[i] = (hsize_t)size; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - outme = malloc((size_t)((size_t)size * (size_t)size * sizeof(double))); - VRFY((outme != NULL), "malloc succeeded for outme"); - - inme = malloc((size_t)size * (size_t)size * sizeof(double)); - VRFY((outme != NULL), "malloc succeeded for inme"); - - filename = PARATESTFILE /* GetTestParameters() */; - VRFY((mpi_size <= size), "mpi_size <= size"); - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - - /* Define data space */ - filespace = H5Screate_simple(DIM, file_dims, NULL); - - /* Create a compact dataset */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation property list succeeded"); - ret = H5Pset_layout(dcpl, H5D_COMPACT); - VRFY((dcpl >= 0), "set property list for compact dataset"); - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); - VRFY((ret >= 0), "set space allocation time for compact dataset"); - - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2 succeeded"); - - /* set up the collective transfer properties list */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), ""); - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* Recalculate data to write. Each process writes the same data. */ - for (i = 0; i < size; i++) - for (j = 0; j < size; j++) - outme[(i * size) + j] = (i + j) * 1000; - - ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - H5Pclose(dcpl); - H5Pclose(plist); - H5Dclose(dataset); - H5Sclose(filespace); - H5Fclose(iof); - - /* Open the file and dataset, read and compare the data. */ - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); - VRFY((iof >= 0), "H5Fopen succeeded"); - - /* set up the collective transfer properties list */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), ""); - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - dataset = H5Dopen2(iof, dname, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dopen2 succeeded"); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; - ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL, - NULL, NULL, NULL, NULL, NULL); - VRFY((ret >= 0), "H5Pinsert2() succeeded"); -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme); - VRFY((ret >= 0), "H5Dread succeeded"); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - prop_value = false; - ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); - VRFY((ret >= 0), "H5Pget succeeded"); - VRFY((prop_value == false && dxfer_coll_type == DXFER_COLLECTIVE_IO), - "rank 0 Bcast optimization was performed for a compact dataset"); -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - /* Verify data value */ - for (i = 0; i < size; i++) - for (j = 0; j < size; j++) - if (!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j])) - if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, - outme[(i * size) + j], inme[(i * size) + j]); - - H5Pclose(plist); - H5Pclose(dxpl); - H5Dclose(dataset); - H5Fclose(iof); - free(inme); - free(outme); -} - -/* - * Example of using PHDF5 to create, write, and read dataset and attribute - * of Null dataspace. - */ -void -null_dataset(void) -{ - int mpi_size, mpi_rank; - hid_t iof, plist, dxpl, dataset, attr, sid; - unsigned uval = 2; /* Buffer for writing to dataset */ - int val = 1; /* Buffer for writing to attribute */ - hssize_t nelem; - char dname[] = "dataset"; - char attr_name[] = "attribute"; - herr_t ret; - const char *filename; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset, or attribute aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - filename = PARATESTFILE /* GetTestParameters() */; - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - - /* Define data space */ - sid = H5Screate(H5S_NULL); - - /* Check that the null dataspace actually has 0 elements */ - nelem = H5Sget_simple_extent_npoints(sid); - VRFY((nelem == 0), "H5Sget_simple_extent_npoints"); - - /* Create a compact dataset */ - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2 succeeded"); - - /* set up the collective transfer properties list */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), ""); - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* Write "nothing" to the dataset(with type conversion) */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* Create an attribute for the group */ - attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT); - VRFY((attr >= 0), "H5Acreate2"); - - /* Write "nothing" to the attribute(with type conversion) */ - ret = H5Awrite(attr, H5T_NATIVE_INT, &val); - VRFY((ret >= 0), "H5Awrite"); - - H5Aclose(attr); - H5Dclose(dataset); - H5Pclose(plist); - H5Sclose(sid); - H5Fclose(iof); - - /* Open the file and dataset, read and compare the data. */ - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); - VRFY((iof >= 0), "H5Fopen succeeded"); - - /* set up the collective transfer properties list */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), ""); - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - dataset = H5Dopen2(iof, dname, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dopen2 succeeded"); - - /* Try reading from the dataset(make certain our buffer is unmodified) */ - ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval); - VRFY((ret >= 0), "H5Dread"); - VRFY((uval == 2), "H5Dread"); - - /* Open the attribute for the dataset */ - attr = H5Aopen(dataset, attr_name, H5P_DEFAULT); - VRFY((attr >= 0), "H5Aopen"); - - /* Try reading from the attribute(make certain our buffer is unmodified) */ ret = - H5Aread(attr, H5T_NATIVE_INT, &val); - VRFY((ret >= 0), "H5Aread"); - VRFY((val == 1), "H5Aread"); - - H5Pclose(plist); - H5Pclose(dxpl); - H5Aclose(attr); - H5Dclose(dataset); - H5Fclose(iof); -} - -/* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB) - * Actual data is _not_ written to these datasets. Dataspaces are exact - * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over - * the boundary of interest. - */ -void -big_dataset(void) -{ - int mpi_size, mpi_rank; /* MPI info */ - hid_t iof, /* File ID */ - fapl, /* File access property list ID */ - dataset, /* Dataset ID */ - filespace; /* Dataset's dataspace ID */ - hsize_t file_dims[4]; /* Dimensions of dataspace */ - char dname[] = "dataset"; /* Name of dataset */ -#if 0 - MPI_Offset file_size; /* Size of file on disk */ -#endif - herr_t ret; /* Generic return value */ - const char *filename; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - /* Verify MPI_Offset can handle larger than 2GB sizes */ - VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4"); - - filename = PARATESTFILE /* GetTestParameters() */; - - fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl >= 0), "create_faccess_plist succeeded"); - - /* - * Create >2GB HDF5 file - */ - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((iof >= 0), "H5Fcreate succeeded"); - - /* Define dataspace for 2GB dataspace */ - file_dims[0] = 2; - file_dims[1] = 1024; - file_dims[2] = 1024; - file_dims[3] = 1024; - filespace = H5Screate_simple(4, file_dims, NULL); - VRFY((filespace >= 0), "H5Screate_simple succeeded"); - - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2 succeeded"); - - /* Close all file objects */ - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(filespace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Fclose(iof); - VRFY((ret >= 0), "H5Fclose succeeded"); - -#if 0 - /* Check that file of the correct size was created */ - file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)"); -#endif - - /* - * Create >4GB HDF5 file - */ - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((iof >= 0), "H5Fcreate succeeded"); - - /* Define dataspace for 4GB dataspace */ - file_dims[0] = 4; - file_dims[1] = 1024; - file_dims[2] = 1024; - file_dims[3] = 1024; - filespace = H5Screate_simple(4, file_dims, NULL); - VRFY((filespace >= 0), "H5Screate_simple succeeded"); - - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2 succeeded"); - - /* Close all file objects */ - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(filespace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Fclose(iof); - VRFY((ret >= 0), "H5Fclose succeeded"); -#if 0 - /* Check that file of the correct size was created */ - file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)"); -#endif - - /* - * Create >8GB HDF5 file - */ - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((iof >= 0), "H5Fcreate succeeded"); - - /* Define dataspace for 8GB dataspace */ - file_dims[0] = 8; - file_dims[1] = 1024; - file_dims[2] = 1024; - file_dims[3] = 1024; - filespace = H5Screate_simple(4, file_dims, NULL); - VRFY((filespace >= 0), "H5Screate_simple succeeded"); - - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2 succeeded"); - - /* Close all file objects */ - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(filespace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Fclose(iof); - VRFY((ret >= 0), "H5Fclose succeeded"); -#if 0 - /* Check that file of the correct size was created */ - file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)"); -#endif - - /* Close fapl */ - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); -} - -/* Example of using PHDF5 to read a partial written dataset. The dataset does - * not have actual data written to the entire raw data area and relies on the - * default fill value of zeros to work correctly. - */ -void -dataset_fillvalue(void) -{ - int mpi_size, mpi_rank; /* MPI info */ - int err_num; /* Number of errors */ - hid_t iof, /* File ID */ - fapl, /* File access property list ID */ - dxpl, /* Data transfer property list ID */ - dataset, /* Dataset ID */ - memspace, /* Memory dataspace ID */ - filespace; /* Dataset's dataspace ID */ - char dname[] = "dataset"; /* Name of dataset */ - hsize_t dset_dims[4] = {0, 6, 7, 8}; - hsize_t req_start[4] = {0, 0, 0, 0}; - hsize_t req_count[4] = {1, 6, 7, 8}; - hsize_t dset_size; /* Dataset size */ - int *rdata, *wdata; /* Buffers for data to read and write */ - int *twdata, *trdata; /* Temporary pointer into buffer */ - int acc, i, ii, j, k, l; /* Local index variables */ - herr_t ret; /* Generic return value */ - const char *filename; -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - bool prop_value; -#endif - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - filename = PARATESTFILE /* GetTestParameters() */; - - /* Set the dataset dimension to be one row more than number of processes */ - /* and calculate the actual dataset size. */ - dset_dims[0] = (hsize_t)(mpi_size + 1); - dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3]; - - /* Allocate space for the buffers */ - rdata = malloc((size_t)(dset_size * sizeof(int))); - VRFY((rdata != NULL), "calloc succeeded for read buffer"); - wdata = malloc((size_t)(dset_size * sizeof(int))); - VRFY((wdata != NULL), "malloc succeeded for write buffer"); - - fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl >= 0), "create_faccess_plist succeeded"); - - /* - * Create HDF5 file - */ - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((iof >= 0), "H5Fcreate succeeded"); - - filespace = H5Screate_simple(4, dset_dims, NULL); - VRFY((filespace >= 0), "File H5Screate_simple succeeded"); - - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2 succeeded"); - - memspace = H5Screate_simple(4, dset_dims, NULL); - VRFY((memspace >= 0), "Memory H5Screate_simple succeeded"); - - /* - * Read dataset before any data is written. - */ - - /* Create DXPL for I/O */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), "H5Pcreate succeeded"); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; - ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL, - NULL, NULL, NULL, NULL, NULL); - VRFY((ret >= 0), "testing property list inserted succeeded"); -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - for (ii = 0; ii < 2; ii++) { - - if (ii == 0) - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); - else - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* set entire read buffer with the constant 2 */ - memset(rdata, 2, (size_t)(dset_size * sizeof(int))); - - /* Read the entire dataset back */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata); - VRFY((ret >= 0), "H5Dread succeeded"); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - prop_value = false; - ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); - VRFY((ret >= 0), "testing property list get succeeded"); - if (ii == 0) - VRFY((prop_value == false), "correctly handled rank 0 Bcast"); - else - VRFY((prop_value == true), "correctly handled rank 0 Bcast"); -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - /* Verify all data read are the fill value 0 */ - trdata = rdata; - err_num = 0; - for (i = 0; i < (int)dset_dims[0]; i++) - for (j = 0; j < (int)dset_dims[1]; j++) - for (k = 0; k < (int)dset_dims[2]; k++) - for (l = 0; l < (int)dset_dims[3]; l++, trdata++) - if (*trdata != 0) - if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf( - "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", - mpi_rank, i, j, k, l, *trdata); - if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("Rank %d: [more errors ...]\n", mpi_rank); - if (err_num) { - printf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num); - nerrors++; - } - } - - /* Barrier to ensure all processes have completed the above test. */ - MPI_Barrier(MPI_COMM_WORLD); - - /* - * Each process writes 1 row of data. Thus last row is not written. - */ - /* Create hyperslabs in memory and file dataspaces */ - req_start[0] = (hsize_t)mpi_rank; - ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); - VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); - ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); - VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); - - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* Fill write buffer with some values */ - twdata = wdata; - for (i = 0, acc = 0; i < (int)dset_dims[0]; i++) - for (j = 0; j < (int)dset_dims[1]; j++) - for (k = 0; k < (int)dset_dims[2]; k++) - for (l = 0; l < (int)dset_dims[3]; l++) - *twdata++ = acc++; - - /* Collectively write a hyperslab of data to the dataset */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, memspace, filespace, dxpl, wdata); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* Barrier here, to allow processes to sync */ - MPI_Barrier(MPI_COMM_WORLD); - - /* - * Read dataset after partial write. - */ - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; - ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); - VRFY((ret >= 0), " H5Pset succeeded"); -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - for (ii = 0; ii < 2; ii++) { - - if (ii == 0) - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); - else - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* set entire read buffer with the constant 2 */ - memset(rdata, 2, (size_t)(dset_size * sizeof(int))); - - /* Read the entire dataset back */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata); - VRFY((ret >= 0), "H5Dread succeeded"); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - prop_value = false; - ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); - VRFY((ret >= 0), "testing property list get succeeded"); - if (ii == 0) - VRFY((prop_value == false), "correctly handled rank 0 Bcast"); - else - VRFY((prop_value == true), "correctly handled rank 0 Bcast"); -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - - /* Verify correct data read */ - twdata = wdata; - trdata = rdata; - err_num = 0; - for (i = 0; i < (int)dset_dims[0]; i++) - for (j = 0; j < (int)dset_dims[1]; j++) - for (k = 0; k < (int)dset_dims[2]; k++) - for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++) - if (i < mpi_size) { - if (*twdata != *trdata) - if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", - i, j, k, l, *twdata, *trdata); - } /* end if */ - else { - if (*trdata != 0) - if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, - j, k, l, *trdata); - } /* end else */ - if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if (err_num) { - printf("%d errors found in check_value\n", err_num); - nerrors++; - } - } - - /* Close all file objects */ - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(filespace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Fclose(iof); - VRFY((ret >= 0), "H5Fclose succeeded"); - - /* Close memory dataspace */ - ret = H5Sclose(memspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - - /* Close dxpl */ - ret = H5Pclose(dxpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* Close fapl */ - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* free the buffers */ - free(rdata); - free(wdata); -} - -/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ -void -collective_group_write_independent_group_read(void) -{ - collective_group_write(); - independent_group_read(); -} - -/* Write multiple groups with a chunked dataset in each group collectively. - * These groups and datasets are for testing independent read later. - */ -void -collective_group_write(void) -{ - int mpi_rank, mpi_size, size; - int i, j, m; - char gname[64], dname[32]; - hid_t fid, gid, did, plist, dcpl, memspace, filespace; - DATATYPE *outme = NULL; - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; - hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */ - herr_t ret1, ret2; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - int ngroups; - -#if 0 - pt = GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - /* ngroups = pt->count; */ ngroups = NGROUPS; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf( - " API functions for basic file, group, or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - size = get_size(); - - chunk_size[0] = (hsize_t)(size / 2); - chunk_size[1] = (hsize_t)(size / 2); - - outme = malloc((size_t)size * (size_t)size * sizeof(DATATYPE)); - VRFY((outme != NULL), "malloc succeeded for outme"); - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - VRFY((fid >= 0), "H5Fcreate"); - H5Pclose(plist); - - /* decide the hyperslab according to process number. */ - get_slab(chunk_origin, chunk_dims, count, file_dims, size); - - /* select hyperslab in memory and file spaces. These two operations are - * identical since the datasets are the same. */ - memspace = H5Screate_simple(DIM, file_dims, NULL); - ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - filespace = H5Screate_simple(DIM, file_dims, NULL); - ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - VRFY((memspace >= 0), "memspace"); - VRFY((filespace >= 0), "filespace"); - VRFY((ret1 == 0), "mgroup memspace selection"); - VRFY((ret2 == 0), "mgroup filespace selection"); - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - ret1 = H5Pset_chunk(dcpl, 2, chunk_size); - VRFY((dcpl >= 0), "dataset creation property"); - VRFY((ret1 == 0), "set chunk for dataset creation property"); - - /* creates ngroups groups under the root group, writes chunked - * datasets in parallel. */ - for (m = 0; m < ngroups; m++) { - snprintf(gname, sizeof(gname), "group%d", m); - gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((gid > 0), gname); - - snprintf(dname, sizeof(dname), "dataset%d", m); - did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((did > 0), dname); - - for (i = 0; i < size; i++) - for (j = 0; j < size; j++) - outme[(i * size) + j] = (i + j) * 1000 + mpi_rank; - - ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); - VRFY((ret1 == 0), "H5Dwrite"); - - ret1 = H5Dclose(did); - VRFY((ret1 == 0), "H5Dclose"); - - ret1 = H5Gclose(gid); - VRFY((ret1 == 0), "H5Gclose"); - -#ifdef BARRIER_CHECKS - if (!((m + 1) % 10)) { - printf("created %d groups\n", m + 1); - MPI_Barrier(MPI_COMM_WORLD); - } -#endif /* BARRIER_CHECKS */ - } - - H5Pclose(dcpl); - H5Sclose(filespace); - H5Sclose(memspace); - - ret1 = H5Fclose(fid); - VRFY((ret1 == 0), "H5Fclose"); - - free(outme); -} - -/* Let two sets of processes open and read different groups and chunked - * datasets independently. - */ -void -independent_group_read(void) -{ - int mpi_rank, m; - hid_t plist, fid; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - int ngroups; - herr_t ret; - -#if 0 - pt = GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - /* ngroups = pt->count; */ ngroups = NGROUPS; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf( - " API functions for basic file, group, or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - H5Pset_all_coll_metadata_ops(plist, false); - - fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); - VRFY((fid > 0), "H5Fopen"); - H5Pclose(plist); - - /* open groups and read datasets. Odd number processes read even number - * groups from the end; even number processes read odd number groups - * from the beginning. */ - if (mpi_rank % 2 == 0) { - for (m = ngroups - 1; m == 0; m -= 2) - group_dataset_read(fid, mpi_rank, m); - } - else { - for (m = 0; m < ngroups; m += 2) - group_dataset_read(fid, mpi_rank, m); - } - - ret = H5Fclose(fid); - VRFY((ret == 0), "H5Fclose"); -} - -/* Open and read datasets and compare data - */ -static void -group_dataset_read(hid_t fid, int mpi_rank, int m) -{ - int ret, i, j, size; - char gname[64], dname[32]; - hid_t gid, did; - DATATYPE *outdata = NULL; - DATATYPE *indata = NULL; - - size = get_size(); - - indata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE)); - VRFY((indata != NULL), "malloc succeeded for indata"); - - outdata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE)); - VRFY((outdata != NULL), "malloc succeeded for outdata"); - - /* open every group under root group. */ - snprintf(gname, sizeof(gname), "group%d", m); - gid = H5Gopen2(fid, gname, H5P_DEFAULT); - VRFY((gid > 0), gname); - - /* check the data. */ - snprintf(dname, sizeof(dname), "dataset%d", m); - did = H5Dopen2(gid, dname, H5P_DEFAULT); - VRFY((did > 0), dname); - - H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata); - - /* this is the original value */ - for (i = 0; i < size; i++) - for (j = 0; j < size; j++) - outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank; - - /* compare the original value(outdata) to the value in file(indata).*/ - ret = check_value(indata, outdata, size); - VRFY((ret == 0), "check the data"); - - ret = H5Dclose(did); - VRFY((ret == 0), "H5Dclose"); - ret = H5Gclose(gid); - VRFY((ret == 0), "H5Gclose"); - - free(indata); - free(outdata); -} - -/* - * Example of using PHDF5 to create multiple groups. Under the root group, - * it creates ngroups groups. Under the first group just created, it creates - * recursive subgroups of depth GROUP_DEPTH. In each created group, it - * generates NDATASETS datasets. Each process write a hyperslab of an array - * into the file. The structure is like - * - * root group - * | - * ---------------------------- ... ... ------------------------ - * | | | ... ... | | - * group0*+' group1*+' group2*+' ... ... group ngroups*+' - * | - * 1st_child_group*' - * | - * 2nd_child_group*' - * | - * : - * : - * | - * GROUP_DEPTHth_child_group*' - * - * * means the group has dataset(s). - * + means the group has attribute(s). - * ' means the datasets in the groups have attribute(s). - * - */ -void -multiple_group_write(void) -{ - int mpi_rank, mpi_size, size; - int m; - char gname[64]; - hid_t fid, gid, plist, memspace, filespace; - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; - herr_t ret; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - int ngroups; - -#if 0 - pt = GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - /* ngroups = pt->count; */ ngroups = NGROUPS; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, group, dataset, or attribute aren't supported with " - "this connector\n"); - fflush(stdout); - } - - return; - } - - size = get_size(); - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - H5Pclose(plist); - - /* decide the hyperslab according to process number. */ - get_slab(chunk_origin, chunk_dims, count, file_dims, size); - - /* select hyperslab in memory and file spaces. These two operations are - * identical since the datasets are the same. */ - memspace = H5Screate_simple(DIM, file_dims, NULL); - VRFY((memspace >= 0), "memspace"); - ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - VRFY((ret >= 0), "mgroup memspace selection"); - - filespace = H5Screate_simple(DIM, file_dims, NULL); - VRFY((filespace >= 0), "filespace"); - ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - VRFY((ret >= 0), "mgroup filespace selection"); - - /* creates ngroups groups under the root group, writes datasets in - * parallel. */ - for (m = 0; m < ngroups; m++) { - snprintf(gname, sizeof(gname), "group%d", m); - gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((gid > 0), gname); - - /* create attribute for these groups. */ - write_attribute(gid, is_group, m); - - if (m != 0) - write_dataset(memspace, filespace, gid); - - H5Gclose(gid); - -#ifdef BARRIER_CHECKS - if (!((m + 1) % 10)) { - printf("created %d groups\n", m + 1); - MPI_Barrier(MPI_COMM_WORLD); - } -#endif /* BARRIER_CHECKS */ - } - - /* recursively creates subgroups under the first group. */ - gid = H5Gopen2(fid, "group0", H5P_DEFAULT); - create_group_recursive(memspace, filespace, gid, 0); - ret = H5Gclose(gid); - VRFY((ret >= 0), "H5Gclose"); - - ret = H5Sclose(filespace); - VRFY((ret >= 0), "H5Sclose"); - ret = H5Sclose(memspace); - VRFY((ret >= 0), "H5Sclose"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose"); -} - -/* - * In a group, creates NDATASETS datasets. Each process writes a hyperslab - * of a data array to the file. - */ -static void -write_dataset(hid_t memspace, hid_t filespace, hid_t gid) -{ - int i, j, n, size; - int mpi_rank, mpi_size; - char dname[32]; - DATATYPE *outme = NULL; - hid_t did; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - size = get_size(); - - outme = malloc((size_t)size * (size_t)size * sizeof(double)); - VRFY((outme != NULL), "malloc succeeded for outme"); - - for (n = 0; n < NDATASET; n++) { - snprintf(dname, sizeof(dname), "dataset%d", n); - did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((did > 0), dname); - - for (i = 0; i < size; i++) - for (j = 0; j < size; j++) - outme[(i * size) + j] = n * 1000 + mpi_rank; - - H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); - - /* create attribute for these datasets.*/ - write_attribute(did, is_dset, n); - - H5Dclose(did); - } - free(outme); -} - -/* - * Creates subgroups of depth GROUP_DEPTH recursively. Also writes datasets - * in parallel in each group. - */ -static void -create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter) -{ - hid_t child_gid; - int mpi_rank; - char gname[64]; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - -#ifdef BARRIER_CHECKS - if (!((counter + 1) % 10)) { - printf("created %dth child groups\n", counter + 1); - MPI_Barrier(MPI_COMM_WORLD); - } -#endif /* BARRIER_CHECKS */ - - snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1); - child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((child_gid > 0), gname); - - /* write datasets in parallel. */ - write_dataset(memspace, filespace, gid); - - if (counter < GROUP_DEPTH) - create_group_recursive(memspace, filespace, child_gid, counter + 1); - - H5Gclose(child_gid); -} - -/* - * This function is to verify the data from multiple group testing. It opens - * every dataset in every group and check their correctness. - */ -void -multiple_group_read(void) -{ - int mpi_rank, mpi_size, error_num, size; - int m; - char gname[64]; - hid_t plist, fid, gid, memspace, filespace; - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - int ngroups; - -#if 0 - pt = GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - /* ngroups = pt->count; */ ngroups = NGROUPS; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, group, dataset, or attribute aren't supported with " - "this connector\n"); - fflush(stdout); - } - - return; - } - - size = get_size(); - - plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); - H5Pclose(plist); - - /* decide hyperslab for each process */ - get_slab(chunk_origin, chunk_dims, count, file_dims, size); - - /* select hyperslab for memory and file space */ - memspace = H5Screate_simple(DIM, file_dims, NULL); - H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - filespace = H5Screate_simple(DIM, file_dims, NULL); - H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - - /* open every group under root group. */ - for (m = 0; m < ngroups; m++) { - snprintf(gname, sizeof(gname), "group%d", m); - gid = H5Gopen2(fid, gname, H5P_DEFAULT); - VRFY((gid > 0), gname); - - /* check the data. */ - if (m != 0) - if ((error_num = read_dataset(memspace, filespace, gid)) > 0) - nerrors += error_num; - - /* check attribute.*/ - error_num = 0; - if ((error_num = read_attribute(gid, is_group, m)) > 0) - nerrors += error_num; - - H5Gclose(gid); - -#ifdef BARRIER_CHECKS - if (!((m + 1) % 10)) - MPI_Barrier(MPI_COMM_WORLD); -#endif /* BARRIER_CHECKS */ - } - - /* open all the groups in vertical direction. */ - gid = H5Gopen2(fid, "group0", H5P_DEFAULT); - VRFY((gid > 0), "group0"); - recursive_read_group(memspace, filespace, gid, 0); - H5Gclose(gid); - - H5Sclose(filespace); - H5Sclose(memspace); - H5Fclose(fid); -} - -/* - * This function opens all the datasets in a certain, checks the data using - * dataset_vrfy function. - */ -static int -read_dataset(hid_t memspace, hid_t filespace, hid_t gid) -{ - int i, j, n, mpi_rank, mpi_size, size, attr_errors = 0, vrfy_errors = 0; - char dname[32]; - DATATYPE *outdata = NULL, *indata = NULL; - hid_t did; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - size = get_size(); - - indata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE)); - VRFY((indata != NULL), "malloc succeeded for indata"); - - outdata = (DATATYPE *)malloc((size_t)size * (size_t)size * sizeof(DATATYPE)); - VRFY((outdata != NULL), "malloc succeeded for outdata"); - - for (n = 0; n < NDATASET; n++) { - snprintf(dname, sizeof(dname), "dataset%d", n); - did = H5Dopen2(gid, dname, H5P_DEFAULT); - VRFY((did > 0), dname); - - H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata); - - /* this is the original value */ - for (i = 0; i < size; i++) - for (j = 0; j < size; j++) { - *outdata = n * 1000 + mpi_rank; - outdata++; - } - outdata -= size * size; - - /* compare the original value(outdata) to the value in file(indata).*/ - vrfy_errors = check_value(indata, outdata, size); - - /* check attribute.*/ - if ((attr_errors = read_attribute(did, is_dset, n)) > 0) - vrfy_errors += attr_errors; - - H5Dclose(did); - } - - free(indata); - free(outdata); - - return vrfy_errors; -} - -/* - * This recursive function opens all the groups in vertical direction and - * checks the data. - */ -static void -recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter) -{ - hid_t child_gid; - int mpi_rank, err_num = 0; - char gname[64]; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); -#ifdef BARRIER_CHECKS - if ((counter + 1) % 10) - MPI_Barrier(MPI_COMM_WORLD); -#endif /* BARRIER_CHECKS */ - - if ((err_num = read_dataset(memspace, filespace, gid))) - nerrors += err_num; - - if (counter < GROUP_DEPTH) { - snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1); - child_gid = H5Gopen2(gid, gname, H5P_DEFAULT); - VRFY((child_gid > 0), gname); - recursive_read_group(memspace, filespace, child_gid, counter + 1); - H5Gclose(child_gid); - } -} - -/* Create and write attribute for a group or a dataset. For groups, attribute - * is a scalar datum; for dataset, it is a one-dimensional array. - */ -static void -write_attribute(hid_t obj_id, int this_type, int num) -{ - hid_t sid, aid; - hsize_t dspace_dims[1] = {8}; - int i, mpi_rank, attr_data[8], dspace_rank = 1; - char attr_name[32]; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - if (this_type == is_group) { - snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num); - sid = H5Screate(H5S_SCALAR); - aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - H5Awrite(aid, H5T_NATIVE_INT, &num); - H5Aclose(aid); - H5Sclose(sid); - } /* end if */ - else if (this_type == is_dset) { - snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num); - for (i = 0; i < 8; i++) - attr_data[i] = i; - sid = H5Screate_simple(dspace_rank, dspace_dims, NULL); - aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - H5Awrite(aid, H5T_NATIVE_INT, attr_data); - H5Aclose(aid); - H5Sclose(sid); - } /* end else-if */ -} - -/* Read and verify attribute for group or dataset. */ -static int -read_attribute(hid_t obj_id, int this_type, int num) -{ - hid_t aid; - hsize_t group_block[2] = {1, 1}, dset_block[2] = {1, 8}; - int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0; - char attr_name[32]; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - if (this_type == is_group) { - snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num); - aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); - H5Aread(aid, H5T_NATIVE_INT, &in_num); - vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num); - H5Aclose(aid); - } - else if (this_type == is_dset) { - snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num); - for (i = 0; i < 8; i++) - out_data[i] = i; - aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); - H5Aread(aid, H5T_NATIVE_INT, in_data); - vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data); - H5Aclose(aid); - } - - return vrfy_errors; -} - -/* This functions compares the original data with the read-in data for its - * hyperslab part only by process ID. - */ -static int -check_value(DATATYPE *indata, DATATYPE *outdata, int size) -{ - int mpi_rank, mpi_size, err_num = 0; - hsize_t i, j; - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], count[DIM]; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - get_slab(chunk_origin, chunk_dims, count, NULL, size); - - indata += chunk_origin[0] * (hsize_t)size; - outdata += chunk_origin[0] * (hsize_t)size; - for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++) - for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) { - if (*indata != *outdata) - if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata, - *indata); - } - if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if (err_num) - printf("%d errors found in check_value\n", err_num); - return err_num; -} - -/* Decide the portion of data chunk in dataset by process ID. - */ - -static void -get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t file_dims[], int size) -{ - int mpi_rank, mpi_size; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - if (chunk_origin != NULL) { - chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size); - chunk_origin[1] = 0; - } - if (chunk_dims != NULL) { - chunk_dims[0] = (hsize_t)(size / mpi_size); - chunk_dims[1] = (hsize_t)size; - } - if (file_dims != NULL) - file_dims[0] = file_dims[1] = (hsize_t)size; - if (count != NULL) - count[0] = count[1] = 1; -} - -/* - * This function is based on bug demonstration code provided by Thomas - * Guignon(thomas.guignon@ifp.fr), and is intended to verify the - * correctness of my fix for that bug. - * - * In essence, the bug appeared when at least one process attempted to - * write a point selection -- for which collective I/O is not supported, - * and at least one other attempted to write some other type of selection - * for which collective I/O is supported. - * - * Since the processes did not compare notes before performing the I/O, - * some would attempt collective I/O while others performed independent - * I/O. A hang resulted. - * - * This function reproduces this situation. At present the test hangs - * on failure. - * JRM - 9/13/04 - */ - -#define N 4 - -void -io_mode_confusion(void) -{ - /* - * HDF5 APIs definitions - */ - - const int rank = 1; - const char *dataset_name = "IntArray"; - - hid_t file_id, dset_id; /* file and dataset identifiers */ - hid_t filespace, memspace; /* file and memory dataspace */ - /* identifiers */ - hsize_t dimsf[1]; /* dataset dimensions */ - int data[N] = {1}; /* pointer to data buffer to write */ - hsize_t coord[N] = {0L, 1L, 2L, 3L}; - hid_t plist_id; /* property list identifier */ - herr_t status; - - /* - * MPI variables - */ - - int mpi_size, mpi_rank; - - /* - * test bed related variables - */ - - const char *fcn_name = "io_mode_confusion"; - const bool verbose = false; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - -#if 0 - pt = GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - /* - * Set up file access property list with parallel I/O access - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); - - plist_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((plist_id != -1), "H5Pcreate() failed"); - - status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); - VRFY((status >= 0), "H5Pset_fapl_mpio() failed"); - - /* - * Create a new file collectively and release property list identifier. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); - VRFY((file_id >= 0), "H5Fcreate() failed"); - - status = H5Pclose(plist_id); - VRFY((status >= 0), "H5Pclose() failed"); - - /* - * Create the dataspace for the dataset. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name); - - dimsf[0] = N; - filespace = H5Screate_simple(rank, dimsf, NULL); - VRFY((filespace >= 0), "H5Screate_simple() failed."); - - /* - * Create the dataset with default properties and close filespace. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name); - - dset_id = - H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate2() failed"); - - status = H5Sclose(filespace); - VRFY((status >= 0), "H5Sclose() failed"); - - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name); - - memspace = H5Screate_simple(rank, dimsf, NULL); - VRFY((memspace >= 0), "H5Screate_simple() failed."); - - if (mpi_rank == 0) { - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name); - - status = H5Sselect_all(memspace); - VRFY((status >= 0), "H5Sselect_all() failed"); - } - else { - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name); - - status = H5Sselect_none(memspace); - VRFY((status >= 0), "H5Sselect_none() failed"); - } - - if (verbose) - fprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); - - MPI_Barrier(MPI_COMM_WORLD); - - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name); - - filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0), "H5Dget_space() failed"); - - /* select all */ - if (mpi_rank == 0) { - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name); - - status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord); - VRFY((status >= 0), "H5Sselect_elements() failed"); - } - else { /* select nothing */ - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name); - - status = H5Sselect_none(filespace); - VRFY((status >= 0), "H5Sselect_none() failed"); - } - - if (verbose) - fprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); - - MPI_Barrier(MPI_COMM_WORLD); - - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name); - - plist_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((plist_id != -1), "H5Pcreate() failed"); - - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name); - - status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); - VRFY((status >= 0), "H5Pset_dxpl_mpio() failed"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((status >= 0), "set independent IO collectively succeeded"); - } - - if (verbose) - fprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name); - - status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data); - - if (verbose) - fprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status); - VRFY((status >= 0), "H5Dwrite() failed"); - - /* - * Close/release resources. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name); - - status = H5Dclose(dset_id); - VRFY((status >= 0), "H5Dclose() failed"); - - status = H5Sclose(filespace); - VRFY((status >= 0), "H5Dclose() failed"); - - status = H5Sclose(memspace); - VRFY((status >= 0), "H5Sclose() failed"); - - status = H5Pclose(plist_id); - VRFY((status >= 0), "H5Pclose() failed"); - - status = H5Fclose(file_id); - VRFY((status >= 0), "H5Fclose() failed"); - - if (verbose) - fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); - - return; - -} /* io_mode_confusion() */ - -#undef N - -/* - * At present, the object header code maintains an image of its on disk - * representation, which is updates as necessary instead of generating on - * request. - * - * Prior to the fix that this test in designed to verify, the image of the - * on disk representation was only updated on flush -- not when the object - * header was marked clean. - * - * This worked perfectly well as long as all writes of a given object - * header were written from a single process. However, with the implementation - * of round robin metadata data writes in parallel HDF5, this is no longer - * the case -- it is possible for a given object header to be flushed from - * several different processes, with the object header simply being marked - * clean in all other processes on each flush. This resulted in NULL or - * out of data object header information being written to disk. - * - * To repair this, I modified the object header code to update its - * on disk image both on flush on when marked clean. - * - * This test is directed at verifying that the fix performs as expected. - * - * The test functions by creating a HDF5 file with several small datasets, - * and then flushing the file. This should result of at least one of - * the associated object headers being flushed by a process other than - * process 0. - * - * Then for each data set, add an attribute and flush the file again. - * - * Close the file and re-open it. - * - * Open the each of the data sets in turn. If all opens are successful, - * the test passes. Otherwise the test fails. - * - * Note that this test will probably become irrelevant shortly, when we - * land the journaling modifications on the trunk -- at which point all - * cache clients will have to construct on disk images on demand. - * - * JRM -- 10/13/10 - */ - -#define NUM_DATA_SETS 4 -#define LOCAL_DATA_SIZE 4 -#define LARGE_ATTR_SIZE 256 -/* Since all even and odd processes are split into writer and reader comm - * respectively, process 0 and 1 in COMM_WORLD become the root process of - * the writer and reader comm respectively. - */ -#define Writer_Root 0 -#define Reader_Root 1 -#define Reader_wait(mpi_err, xsteps) mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD) -#define Reader_result(mpi_err, xsteps_done) \ - mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD) -#define Reader_check(mpi_err, xsteps, xsteps_done) \ - { \ - Reader_wait(mpi_err, xsteps); \ - Reader_result(mpi_err, xsteps_done); \ - } - -/* object names used by both rr_obj_hdr_flush_confusion and - * rr_obj_hdr_flush_confusion_reader. - */ -const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"}; -const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"}; -const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2", - "large_attribute_3"}; - -void -rr_obj_hdr_flush_confusion(void) -{ - /* MPI variables */ - /* private communicator size and rank */ - int mpi_size; - int mpi_rank; - int mrc; /* mpi error code */ - int is_reader; /* 1 for reader process; 0 for writer process. */ - MPI_Comm comm; - - /* test bed related variables */ - const char *fcn_name = "rr_obj_hdr_flush_confusion"; - const bool verbose = false; - - /* Create two new private communicators from MPI_COMM_WORLD. - * Even and odd ranked processes go to comm_writers and comm_readers - * respectively. - */ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file, dataset, attribute, dataset more, attribute more, or " - "file flush aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - assert(mpi_size > 2); - - is_reader = mpi_rank % 2; - mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split"); - - /* The reader processes branches off to do reading - * while the writer processes continues to do writing - * Whenever writers finish one writing step, including a H5Fflush, - * they inform the readers, via MPI_COMM_WORLD, to verify. - * They will wait for the result from the readers before doing the next - * step. When all steps are done, they inform readers to end. - */ - if (is_reader) - rr_obj_hdr_flush_confusion_reader(comm); - else - rr_obj_hdr_flush_confusion_writer(comm); - - MPI_Comm_free(&comm); - if (verbose) - fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); - - return; - -} /* rr_obj_hdr_flush_confusion() */ - -void -rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) -{ - int i; - int j; - hid_t file_id = -1; - hid_t fapl_id = -1; - hid_t dxpl_id = -1; - hid_t att_id[NUM_DATA_SETS]; - hid_t att_space[NUM_DATA_SETS]; - hid_t lg_att_id[NUM_DATA_SETS]; - hid_t lg_att_space[NUM_DATA_SETS]; - hid_t disk_space[NUM_DATA_SETS]; - hid_t mem_space[NUM_DATA_SETS]; - hid_t dataset[NUM_DATA_SETS]; - hsize_t att_size[1]; - hsize_t lg_att_size[1]; - hsize_t disk_count[1]; - hsize_t disk_size[1]; - hsize_t disk_start[1]; - hsize_t mem_count[1]; - hsize_t mem_size[1]; - hsize_t mem_start[1]; - herr_t err; - double data[LOCAL_DATA_SIZE]; - double att[LOCAL_DATA_SIZE]; - double lg_att[LARGE_ATTR_SIZE]; - - /* MPI variables */ - /* world communication size and rank */ - int mpi_world_size; - int mpi_world_rank; - /* private communicator size and rank */ - int mpi_size; - int mpi_rank; - int mrc; /* mpi error code */ - /* steps to verify and have been verified */ - int steps = 0; - int steps_done = 0; - - /* test bed related variables */ - const char *fcn_name = "rr_obj_hdr_flush_confusion_writer"; - const bool verbose = false; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - - /* - * setup test bed related variables: - */ - -#if 0 - pt = (const H5Ptest_param_t *)GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size); - MPI_Comm_rank(comm, &mpi_rank); - MPI_Comm_size(comm, &mpi_size); - - /* - * Set up file access property list with parallel I/O access - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); - - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); - - err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); - VRFY((err >= 0), "H5Pset_fapl_mpio() failed"); - - /* - * Create a new file collectively and release property list identifier. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename); - - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0), "H5Fcreate() failed"); - - err = H5Pclose(fapl_id); - VRFY((err >= 0), "H5Pclose(fapl_id) failed"); - - /* - * Step 1: create the data sets and write data. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name); - - disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size); - mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); - - for (i = 0; i < NUM_DATA_SETS; i++) { - - disk_space[i] = H5Screate_simple(1, disk_size, NULL); - VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n"); - - dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT, - H5P_DEFAULT, H5P_DEFAULT); - - VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n"); - } - - /* - * setup data transfer property list - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); - - dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); - - err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); - VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); - - /* - * write data to the data sets - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name); - - disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); - mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - mem_start[0] = (hsize_t)(0); - - for (j = 0; j < LOCAL_DATA_SIZE; j++) { - data[j] = (double)(mpi_rank + 1); - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); - mem_space[i] = H5Screate_simple(1, mem_size, NULL); - VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); - err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); - err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data); - VRFY((err >= 0), "H5Dwrite(1) failed.\n"); - for (j = 0; j < LOCAL_DATA_SIZE; j++) - data[j] *= 10.0; - } - - /* - * close the data spaces - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); - - for (i = 0; i < NUM_DATA_SETS; i++) { - err = H5Sclose(disk_space[i]); - VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); - err = H5Sclose(mem_space[i]); - VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); - } - - /* End of Step 1: create the data sets and write data. */ - - /* - * flush the metadata cache - */ - - if (verbose) - fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((err >= 0), "H5Fflush(1) failed.\n"); - - /* Tell the reader to check the file up to steps. */ - steps++; - Reader_check(mrc, steps, steps_done); - VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); - - /* - * Step 2: write attributes to each dataset - */ - - if (verbose) - fprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name); - - att_size[0] = (hsize_t)(LOCAL_DATA_SIZE); - for (j = 0; j < LOCAL_DATA_SIZE; j++) { - att[j] = (double)(j + 1); - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - att_space[i] = H5Screate_simple(1, att_size, NULL); - VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n"); - att_id[i] = - H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT); - VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n"); - err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att); - VRFY((err >= 0), "H5Awrite(1) failed.\n"); - for (j = 0; j < LOCAL_DATA_SIZE; j++) { - att[j] /= 10.0; - } - } - - /* - * close attribute IDs and spaces - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name); - - for (i = 0; i < NUM_DATA_SETS; i++) { - err = H5Sclose(att_space[i]); - VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n"); - err = H5Aclose(att_id[i]); - VRFY((err >= 0), "H5Aclose(att_id[i]) failed.\n"); - } - - /* End of Step 2: write attributes to each dataset */ - - /* - * flush the metadata cache again - */ - - if (verbose) - fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((err >= 0), "H5Fflush(2) failed.\n"); - - /* Tell the reader to check the file up to steps. */ - steps++; - Reader_check(mrc, steps, steps_done); - VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); - - /* - * Step 3: write large attributes to each dataset - */ - - if (verbose) - fprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name); - - lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE); - - for (j = 0; j < LARGE_ATTR_SIZE; j++) { - lg_att[j] = (double)(j + 1); - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL); - VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n"); - lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT, - H5P_DEFAULT); - VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n"); - err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att); - VRFY((err >= 0), "H5Awrite(2) failed.\n"); - for (j = 0; j < LARGE_ATTR_SIZE; j++) { - lg_att[j] /= 10.0; - } - } - - /* Step 3: write large attributes to each dataset */ - - /* - * flush the metadata cache yet again to clean the object headers. - * - * This is an attempt to create a situation where we have dirty - * object header continuation chunks, but clean object headers - * to verify a speculative bug fix -- it doesn't seem to work, - * but I will leave the code in anyway, as the object header - * code is going to change a lot in the near future. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((err >= 0), "H5Fflush(3) failed.\n"); - - /* Tell the reader to check the file up to steps. */ - steps++; - Reader_check(mrc, steps, steps_done); - VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); - - /* - * Step 4: write different large attributes to each dataset - */ - - if (verbose) - fprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name); - - for (j = 0; j < LARGE_ATTR_SIZE; j++) { - lg_att[j] = (double)(j + 2); - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att); - VRFY((err >= 0), "H5Awrite(2) failed.\n"); - for (j = 0; j < LARGE_ATTR_SIZE; j++) { - lg_att[j] /= 10.0; - } - } - - /* End of Step 4: write different large attributes to each dataset */ - - /* - * flush the metadata cache again - */ - if (verbose) - fprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); - VRFY((err >= 0), "H5Fflush(3) failed.\n"); - - /* Tell the reader to check the file up to steps. */ - steps++; - Reader_check(mrc, steps, steps_done); - VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); - - /* Step 5: Close all objects and the file */ - - /* - * close large attribute IDs and spaces - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name); - - for (i = 0; i < NUM_DATA_SETS; i++) { - - err = H5Sclose(lg_att_space[i]); - VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n"); - err = H5Aclose(lg_att_id[i]); - VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n"); - } - - /* - * close the data sets - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name); - - for (i = 0; i < NUM_DATA_SETS; i++) { - err = H5Dclose(dataset[i]); - VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); - } - - /* - * close the data transfer property list. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); - - err = H5Pclose(dxpl_id); - VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); - - /* - * Close file. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name); - - err = H5Fclose(file_id); - VRFY((err >= 0), "H5Fclose(1) failed"); - - /* End of Step 5: Close all objects and the file */ - /* Tell the reader to check the file up to steps. */ - steps++; - Reader_check(mrc, steps, steps_done); - VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); - - /* All done. Inform reader to end. */ - steps = 0; - Reader_check(mrc, steps, steps_done); - VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); - - if (verbose) - fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); - - return; - -} /* rr_obj_hdr_flush_confusion_writer() */ - -void -rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) -{ - int i; - int j; - hid_t file_id = -1; - hid_t fapl_id = -1; - hid_t dxpl_id = -1; - hid_t lg_att_id[NUM_DATA_SETS]; - hid_t lg_att_type[NUM_DATA_SETS]; - hid_t disk_space[NUM_DATA_SETS]; - hid_t mem_space[NUM_DATA_SETS]; - hid_t dataset[NUM_DATA_SETS]; - hsize_t disk_count[1]; - hsize_t disk_start[1]; - hsize_t mem_count[1]; - hsize_t mem_size[1]; - hsize_t mem_start[1]; - herr_t err; - htri_t tri_err; - double data[LOCAL_DATA_SIZE]; - double data_read[LOCAL_DATA_SIZE]; - double att[LOCAL_DATA_SIZE]; - double att_read[LOCAL_DATA_SIZE]; - double lg_att[LARGE_ATTR_SIZE]; - double lg_att_read[LARGE_ATTR_SIZE]; - - /* MPI variables */ - /* world communication size and rank */ - int mpi_world_size; - int mpi_world_rank; - /* private communicator size and rank */ - int mpi_size; - int mpi_rank; - int mrc; /* mpi error code */ - int steps = -1; /* How far (steps) to verify the file */ - int steps_done = -1; /* How far (steps) have been verified */ - - /* test bed related variables */ - const char *fcn_name = "rr_obj_hdr_flush_confusion_reader"; - const bool verbose = false; -#if 0 - const H5Ptest_param_t *pt; -#endif - char *filename; - - /* - * setup test bed related variables: - */ - -#if 0 - pt = (const H5Ptest_param_t *)GetTestParameters(); -#endif - /* filename = pt->name; */ filename = PARATESTFILE; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size); - MPI_Comm_rank(comm, &mpi_rank); - MPI_Comm_size(comm, &mpi_size); - - /* Repeatedly re-open the file and verify its contents until it is */ - /* told to end (when steps=0). */ - while (steps_done != 0) { - Reader_wait(mrc, steps); - VRFY((mrc >= 0), "Reader_wait failed"); - steps_done = 0; - - if (steps > 0) { - /* - * Set up file access property list with parallel I/O access - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); - - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); - err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); - VRFY((err >= 0), "H5Pset_fapl_mpio() failed"); - - /* - * Create a new file collectively and release property list identifier. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", mpi_rank, fcn_name, filename); - - file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id); - VRFY((file_id >= 0), "H5Fopen() failed"); - err = H5Pclose(fapl_id); - VRFY((err >= 0), "H5Pclose(fapl_id) failed"); - -#if 1 - if (steps >= 1) { - /*=====================================================* - * Step 1: open the data sets and read data. - *=====================================================*/ - - if (verbose) - fprintf(stdout, "%0d:%s: opening the datasets.\n", mpi_rank, fcn_name); - - for (i = 0; i < NUM_DATA_SETS; i++) { - dataset[i] = -1; - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT); - VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n"); - disk_space[i] = H5Dget_space(dataset[i]); - VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n"); - } - - /* - * setup data transfer property list - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); - - dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); - err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); - VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); - - /* - * read data from the data sets - */ - - if (verbose) - fprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name); - - disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); - - mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); - - mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - mem_start[0] = (hsize_t)(0); - - /* set up expected data for verification */ - for (j = 0; j < LOCAL_DATA_SIZE; j++) { - data[j] = (double)(mpi_rank + 1); - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, - NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); - mem_space[i] = H5Screate_simple(1, mem_size, NULL); - VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); - err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); - err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, - data_read); - VRFY((err >= 0), "H5Dread(1) failed.\n"); - - /* compare read data with expected data */ - for (j = 0; j < LOCAL_DATA_SIZE; j++) - if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])) { - fprintf(stdout, - "%0d:%s: Reading datasets value failed in " - "Dataset %d, at position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, data[j], data_read[j]); - nerrors++; - } - for (j = 0; j < LOCAL_DATA_SIZE; j++) - data[j] *= 10.0; - } - - /* - * close the data spaces - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); - - for (i = 0; i < NUM_DATA_SETS; i++) { - err = H5Sclose(disk_space[i]); - VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); - err = H5Sclose(mem_space[i]); - VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); - } - steps_done++; - } - /* End of Step 1: open the data sets and read data. */ -#endif - -#if 1 - /*=====================================================* - * Step 2: reading attributes from each dataset - *=====================================================*/ - - if (steps >= 2) { - if (verbose) - fprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name); - - for (j = 0; j < LOCAL_DATA_SIZE; j++) { - att[j] = (double)(j + 1); - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - hid_t att_id, att_type; - - att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT); - VRFY((att_id >= 0), "H5Aopen failed.\n"); - att_type = H5Aget_type(att_id); - VRFY((att_type >= 0), "H5Aget_type failed.\n"); - tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE); - VRFY((tri_err >= 0), "H5Tequal failed.\n"); - if (tri_err == 0) { - fprintf(stdout, "%0d:%s: Mismatched Attribute type of Dataset %d.\n", mpi_rank, - fcn_name, i); - nerrors++; - } - else { - /* should verify attribute size before H5Aread */ - err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read); - VRFY((err >= 0), "H5Aread failed.\n"); - /* compare read attribute data with expected data */ - for (j = 0; j < LOCAL_DATA_SIZE; j++) - if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])) { - fprintf(stdout, - "%0d:%s: Mismatched attribute data read in Dataset %d, at position " - "%d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, att[j], att_read[j]); - nerrors++; - } - for (j = 0; j < LOCAL_DATA_SIZE; j++) { - att[j] /= 10.0; - } - } - err = H5Aclose(att_id); - VRFY((err >= 0), "H5Aclose failed.\n"); - } - steps_done++; - } - /* End of Step 2: reading attributes from each dataset */ -#endif - -#if 1 - /*=====================================================* - * Step 3 or 4: read large attributes from each dataset. - * Step 4 has different attribute value from step 3. - *=====================================================*/ - - if (steps >= 3) { - if (verbose) - fprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name); - - for (j = 0; j < LARGE_ATTR_SIZE; j++) { - lg_att[j] = (steps == 3) ? (double)(j + 1) : (double)(j + 2); - } - - for (i = 0; i < NUM_DATA_SETS; i++) { - lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT); - VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n"); - lg_att_type[i] = H5Aget_type(lg_att_id[i]); - VRFY((err >= 0), "H5Aget_type failed.\n"); - tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE); - VRFY((tri_err >= 0), "H5Tequal failed.\n"); - if (tri_err == 0) { - fprintf(stdout, "%0d:%s: Mismatched Large attribute type of Dataset %d.\n", mpi_rank, - fcn_name, i); - nerrors++; - } - else { - /* should verify large attribute size before H5Aread */ - err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read); - VRFY((err >= 0), "H5Aread failed.\n"); - /* compare read attribute data with expected data */ - for (j = 0; j < LARGE_ATTR_SIZE; j++) - if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])) { - fprintf(stdout, - "%0d:%s: Mismatched large attribute data read in Dataset %d, at " - "position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]); - nerrors++; - } - for (j = 0; j < LARGE_ATTR_SIZE; j++) { - - lg_att[j] /= 10.0; - } - } - err = H5Tclose(lg_att_type[i]); - VRFY((err >= 0), "H5Tclose failed.\n"); - err = H5Aclose(lg_att_id[i]); - VRFY((err >= 0), "H5Aclose failed.\n"); - } - /* Both step 3 and 4 use this same read checking code. */ - steps_done = (steps == 3) ? 3 : 4; - } - - /* End of Step 3 or 4: read large attributes from each dataset */ -#endif - - /*=====================================================* - * Step 5: read all objects from the file - *=====================================================*/ - if (steps >= 5) { - /* nothing extra to verify. The file is closed normally. */ - /* Just increment steps_done */ - steps_done++; - } - - /* - * Close the data sets - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing datasets again.\n", mpi_rank, fcn_name); - - for (i = 0; i < NUM_DATA_SETS; i++) { - if (dataset[i] >= 0) { - err = H5Dclose(dataset[i]); - VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); - } - } - - /* - * close the data transfer property list. - */ - - if (verbose) - fprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); - - err = H5Pclose(dxpl_id); - VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); - - /* - * Close the file - */ - if (verbose) - fprintf(stdout, "%0d:%s: closing file again.\n", mpi_rank, fcn_name); - err = H5Fclose(file_id); - VRFY((err >= 0), "H5Fclose(1) failed"); - - } /* else if (steps_done==0) */ - Reader_result(mrc, steps_done); - } /* end while(1) */ - - if (verbose) - fprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); - - return; -} /* rr_obj_hdr_flush_confusion_reader() */ - -#undef NUM_DATA_SETS -#undef LOCAL_DATA_SIZE -#undef LARGE_ATTR_SIZE -#undef Reader_check -#undef Reader_wait -#undef Reader_result -#undef Writer_Root -#undef Reader_Root - -/* - * Test creating a chunked dataset in parallel in a file with an alignment set - * and an alignment threshold large enough to avoid aligning the chunks but - * small enough that the raw data aggregator will be aligned if it is treated as - * an object that must be aligned by the library - */ -#define CHUNK_SIZE 72 -#define NCHUNKS 32 -#define AGGR_SIZE 2048 -#define EXTRA_ALIGN 100 - -void -chunk_align_bug_1(void) -{ - int mpi_rank; - hid_t file_id, dset_id, fapl_id, dcpl_id, space_id; - hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE; -#if 0 - h5_stat_size_t file_size; - hsize_t align; -#endif - herr_t ret; - const char *filename; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - - /* Create file without alignment */ - fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - - /* Close file */ - ret = H5Fclose(file_id); - VRFY((ret >= 0), "H5Fclose succeeded"); -#if 0 - /* Get file size */ - file_size = h5_get_file_size(filename, fapl_id); - VRFY((file_size >= 0), "h5_get_file_size succeeded"); - - /* Calculate alignment value, set to allow a chunk to squeak in between the - * original EOF and the aligned location of the aggregator. Add some space - * for the dataset metadata */ - align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN; -#endif - - /* Set aggregator size and alignment, disable metadata aggregator */ - assert(AGGR_SIZE > CHUNK_SIZE); - ret = H5Pset_small_data_block_size(fapl_id, AGGR_SIZE); - VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); - ret = H5Pset_meta_block_size(fapl_id, 0); - VRFY((ret >= 0), "H5Pset_meta_block_size succeeded"); -#if 0 - ret = H5Pset_alignment(fapl_id, CHUNK_SIZE + 1, align); - VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); -#endif - - /* Reopen file with new settings */ - file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); - VRFY((file_id >= 0), "H5Fopen succeeded"); - - /* Create dataset */ - space_id = H5Screate_simple(1, &dims, NULL); - VRFY((space_id >= 0), "H5Screate_simple succeeded"); - dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dcpl_id, 1, &cdims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_CHAR, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); - - /* Close ids */ - ret = H5Dclose(dset_id); - VRFY((dset_id >= 0), "H5Dclose succeeded"); - ret = H5Sclose(space_id); - VRFY((space_id >= 0), "H5Sclose succeeded"); - ret = H5Pclose(dcpl_id); - VRFY((dcpl_id >= 0), "H5Pclose succeeded"); - ret = H5Pclose(fapl_id); - VRFY((fapl_id >= 0), "H5Pclose succeeded"); - - /* Close file */ - ret = H5Fclose(file_id); - VRFY((ret >= 0), "H5Fclose succeeded"); - - return; -} /* end chunk_align_bug_1() */ - -/*============================================================================= - * End of t_mdset.c - *===========================================================================*/ diff --git a/testpar/API/t_ph5basic.c b/testpar/API/t_ph5basic.c deleted file mode 100644 index 9c980bf6330..00000000000 --- a/testpar/API/t_ph5basic.c +++ /dev/null @@ -1,188 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Test parallel HDF5 basic components - */ - -#include "hdf5.h" -#include "testphdf5.h" - -/*------------------------------------------------------------------------- - * Function: test_fapl_mpio_dup - * - * Purpose: Test if fapl_mpio property list keeps a duplicate of the - * communicator and INFO objects given when set; and returns - * duplicates of its components when H5Pget_fapl_mpio is called. - * - * Return: Success: None - * Failure: Abort - *------------------------------------------------------------------------- - */ -void -test_fapl_mpio_dup(void) -{ - int mpi_size, mpi_rank; - MPI_Comm comm, comm_tmp; - int mpi_size_old, mpi_rank_old; - int mpi_size_tmp, mpi_rank_tmp; - MPI_Info info = MPI_INFO_NULL; - MPI_Info info_tmp = MPI_INFO_NULL; - int mrc; /* MPI return value */ - hid_t acc_pl; /* File access properties */ - herr_t ret; /* HDF5 return value */ - int nkeys, nkeys_tmp; - - if (VERBOSE_MED) - printf("Verify fapl_mpio duplicates communicator and INFO objects\n"); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - if (VERBOSE_MED) - printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size); - - /* Create a new communicator that has the same processes as MPI_COMM_WORLD. - * Use MPI_Comm_split because it is simpler than MPI_Comm_create - */ - mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split"); - MPI_Comm_size(comm, &mpi_size_old); - MPI_Comm_rank(comm, &mpi_rank_old); - if (VERBOSE_MED) - printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old); - - /* create a new INFO object with some trivial information. */ - mrc = MPI_Info_create(&info); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_create"); - mrc = MPI_Info_set(info, "hdf_info_name", "XYZ"); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_set"); - if (MPI_INFO_NULL != info) { - mrc = MPI_Info_get_nkeys(info, &nkeys); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); - } -#if 0 - if (VERBOSE_MED) - h5_dump_info_object(info); -#endif - - acc_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((acc_pl >= 0), "H5P_FILE_ACCESS"); - - ret = H5Pset_fapl_mpio(acc_pl, comm, info); - VRFY((ret >= 0), ""); - - /* Case 1: - * Free the created communicator and INFO object. - * Check if the access property list is still valid and can return - * valid communicator and INFO object. - */ - mrc = MPI_Comm_free(&comm); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); - if (MPI_INFO_NULL != info) { - mrc = MPI_Info_free(&info); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); - } - - ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp); - VRFY((ret >= 0), "H5Pget_fapl_mpio"); - MPI_Comm_size(comm_tmp, &mpi_size_tmp); - MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); - if (VERBOSE_MED) - printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); - VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size"); - VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank"); - if (MPI_INFO_NULL != info_tmp) { - mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); - VRFY((nkeys_tmp == nkeys), "new and old nkeys equal"); - } -#if 0 - if (VERBOSE_MED) - h5_dump_info_object(info_tmp); -#endif - - /* Case 2: - * Free the retrieved communicator and INFO object. - * Check if the access property list is still valid and can return - * valid communicator and INFO object. - * Also verify the NULL argument option. - */ - mrc = MPI_Comm_free(&comm_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); - if (MPI_INFO_NULL != info_tmp) { - mrc = MPI_Info_free(&info_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); - } - - /* check NULL argument options. */ - ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL); - VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only"); - mrc = MPI_Comm_free(&comm_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); - - ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp); - VRFY((ret >= 0), "H5Pget_fapl_mpio Info only"); - if (MPI_INFO_NULL != info_tmp) { - mrc = MPI_Info_free(&info_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); - } - - ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL); - VRFY((ret >= 0), "H5Pget_fapl_mpio neither"); - - /* now get both and check validity too. */ - /* Do not free the returned objects which are used in the next case. */ - ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp); - VRFY((ret >= 0), "H5Pget_fapl_mpio"); - MPI_Comm_size(comm_tmp, &mpi_size_tmp); - MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); - if (VERBOSE_MED) - printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); - VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size"); - VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank"); - if (MPI_INFO_NULL != info_tmp) { - mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); - VRFY((nkeys_tmp == nkeys), "new and old nkeys equal"); - } -#if 0 - if (VERBOSE_MED) - h5_dump_info_object(info_tmp); -#endif - - /* Case 3: - * Close the property list and verify the retrieved communicator and INFO - * object are still valid. - */ - H5Pclose(acc_pl); - MPI_Comm_size(comm_tmp, &mpi_size_tmp); - MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); - if (VERBOSE_MED) - printf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); - if (MPI_INFO_NULL != info_tmp) { - mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); - } -#if 0 - if (VERBOSE_MED) - h5_dump_info_object(info_tmp); -#endif - - /* clean up */ - mrc = MPI_Comm_free(&comm_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); - if (MPI_INFO_NULL != info_tmp) { - mrc = MPI_Info_free(&info_tmp); - VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); - } -} /* end test_fapl_mpio_dup() */ diff --git a/testpar/API/t_prop.c b/testpar/API/t_prop.c deleted file mode 100644 index a4d90c4d70b..00000000000 --- a/testpar/API/t_prop.c +++ /dev/null @@ -1,646 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Parallel tests for encoding/decoding plists sent between processes - */ - -#include "hdf5.h" -#include "testphdf5.h" - -#if 0 -#include "H5ACprivate.h" -#include "H5Pprivate.h" -#endif - -static int -test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) -{ - MPI_Request req[2]; - MPI_Status status; - hid_t pl; /* Decoded property list */ - size_t buf_size = 0; - void *sbuf = NULL; - herr_t ret; /* Generic return value */ - - if (mpi_rank == 0) { - int send_size = 0; - - /* first call to encode returns only the size of the buffer needed */ - ret = H5Pencode2(orig_pl, NULL, &buf_size, H5P_DEFAULT); - VRFY((ret >= 0), "H5Pencode succeeded"); - - sbuf = (uint8_t *)malloc(buf_size); - - ret = H5Pencode2(orig_pl, sbuf, &buf_size, H5P_DEFAULT); - VRFY((ret >= 0), "H5Pencode succeeded"); - - /* this is a temp fix to send this size_t */ - send_size = (int)buf_size; - - MPI_Isend(&send_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]); - MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]); - } /* end if */ - - if (mpi_rank == recv_proc) { - int recv_size; - void *rbuf; - - MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status); - VRFY((recv_size >= 0), "MPI_Recv succeeded"); - buf_size = (size_t)recv_size; - rbuf = (uint8_t *)malloc(buf_size); - MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status); - - pl = H5Pdecode(rbuf); - VRFY((pl >= 0), "H5Pdecode succeeded"); - - VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded"); - - ret = H5Pclose(pl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - if (NULL != rbuf) - free(rbuf); - } /* end if */ - - if (0 == mpi_rank) { - /* gcc 11 complains about passing MPI_STATUSES_IGNORE as an MPI_Status - * array. See the discussion here: - * - * https://github.com/pmodels/mpich/issues/5687 - */ - /* H5_GCC_DIAG_OFF("stringop-overflow") */ - MPI_Waitall(2, req, MPI_STATUSES_IGNORE); - /* H5_GCC_DIAG_ON("stringop-overflow") */ - } - - if (NULL != sbuf) - free(sbuf); - - MPI_Barrier(MPI_COMM_WORLD); - return 0; -} - -void -test_plist_ed(void) -{ - hid_t dcpl; /* dataset create prop. list */ - hid_t dapl; /* dataset access prop. list */ - hid_t dxpl; /* dataset transfer prop. list */ - hid_t gcpl; /* group create prop. list */ - hid_t lcpl; /* link create prop. list */ - hid_t lapl; /* link access prop. list */ - hid_t ocpypl; /* object copy prop. list */ - hid_t ocpl; /* object create prop. list */ - hid_t fapl; /* file access prop. list */ - hid_t fcpl; /* file create prop. list */ - hid_t strcpl; /* string create prop. list */ - hid_t acpl; /* attribute create prop. list */ - - int mpi_size, mpi_rank, recv_proc; - - hsize_t chunk_size = 16384; /* chunk size */ - double fill = 2.7; /* Fill value */ - size_t nslots = 521 * 2; - size_t nbytes = 1048576 * 10; - double w0 = 0.5; - unsigned max_compact; - unsigned min_dense; - hsize_t max_size[1]; /*data space maximum size */ - const char *c_to_f = "x+32"; - H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION, - true, - false, - false, - "temp", - true, - false, - (2 * 2048 * 1024), - 0.3, - (64 * 1024 * 1024), - (4 * 1024 * 1024), - 60000, - H5C_incr__threshold, - 0.8, - 3.0, - true, - (8 * 1024 * 1024), - H5C_flash_incr__add_space, - 2.0, - 0.25, - H5C_decr__age_out_with_threshold, - 0.997, - 0.8, - true, - (3 * 1024 * 1024), - 3, - false, - 0.2, - (256 * 2048), - 1 /* H5AC__DEFAULT_METADATA_WRITE_STRATEGY */}; - - herr_t ret; /* Generic return value */ - - if (VERBOSE_MED) - printf("Encode/Decode DCPLs\n"); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - if (mpi_size == 1) - recv_proc = 0; - else - recv_proc = 1; - - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_chunk(dcpl, 1, &chunk_size); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE); - VRFY((ret >= 0), "H5Pset_alloc_time succeeded"); - - ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill); - VRFY((ret >= 0), "set fill-value succeeded"); - - max_size[0] = 100; - ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); - VRFY((ret >= 0), "set external succeeded"); - ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); - VRFY((ret >= 0), "set external succeeded"); - ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); - VRFY((ret >= 0), "set external succeeded"); - ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); - VRFY((ret >= 0), "set external succeeded"); - - ret = test_encode_decode(dcpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(dcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE DAPLS *****/ - dapl = H5Pcreate(H5P_DATASET_ACCESS); - VRFY((dapl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_chunk_cache(dapl, nslots, nbytes, w0); - VRFY((ret >= 0), "H5Pset_chunk_cache succeeded"); - - ret = test_encode_decode(dapl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(dapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE OCPLS *****/ - ocpl = H5Pcreate(H5P_OBJECT_CREATE); - VRFY((ocpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_attr_creation_order(ocpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - VRFY((ret >= 0), "H5Pset_attr_creation_order succeeded"); - - ret = H5Pset_attr_phase_change(ocpl, 110, 105); - VRFY((ret >= 0), "H5Pset_attr_phase_change succeeded"); - - ret = H5Pset_filter(ocpl, H5Z_FILTER_FLETCHER32, 0, (size_t)0, NULL); - VRFY((ret >= 0), "H5Pset_filter succeeded"); - - ret = test_encode_decode(ocpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(ocpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE DXPLS *****/ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_btree_ratios(dxpl, 0.2, 0.6, 0.2); - VRFY((ret >= 0), "H5Pset_btree_ratios succeeded"); - - ret = H5Pset_hyper_vector_size(dxpl, 5); - VRFY((ret >= 0), "H5Pset_hyper_vector_size succeeded"); - - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt succeeded"); - - ret = H5Pset_dxpl_mpio_chunk_opt(dxpl, H5FD_MPIO_CHUNK_MULTI_IO); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded"); - - ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl, 30); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); - - ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl, 40); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); - - ret = H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC); - VRFY((ret >= 0), "H5Pset_edc_check succeeded"); - - ret = H5Pset_data_transform(dxpl, c_to_f); - VRFY((ret >= 0), "H5Pset_data_transform succeeded"); - - ret = test_encode_decode(dxpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(dxpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE GCPLS *****/ - gcpl = H5Pcreate(H5P_GROUP_CREATE); - VRFY((gcpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_local_heap_size_hint(gcpl, 256); - VRFY((ret >= 0), "H5Pset_local_heap_size_hint succeeded"); - - ret = H5Pset_link_phase_change(gcpl, 2, 2); - VRFY((ret >= 0), "H5Pset_link_phase_change succeeded"); - - /* Query the group creation properties */ - ret = H5Pget_link_phase_change(gcpl, &max_compact, &min_dense); - VRFY((ret >= 0), "H5Pget_est_link_info succeeded"); - - ret = H5Pset_est_link_info(gcpl, 3, 9); - VRFY((ret >= 0), "H5Pset_est_link_info succeeded"); - - ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED)); - VRFY((ret >= 0), "H5Pset_link_creation_order succeeded"); - - ret = test_encode_decode(gcpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(gcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE LCPLS *****/ - lcpl = H5Pcreate(H5P_LINK_CREATE); - VRFY((lcpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_create_intermediate_group(lcpl, true); - VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded"); - - ret = test_encode_decode(lcpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(lcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE LAPLS *****/ - lapl = H5Pcreate(H5P_LINK_ACCESS); - VRFY((lapl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_nlinks(lapl, (size_t)134); - VRFY((ret >= 0), "H5Pset_nlinks succeeded"); - - ret = H5Pset_elink_acc_flags(lapl, H5F_ACC_RDONLY); - VRFY((ret >= 0), "H5Pset_elink_acc_flags succeeded"); - - ret = H5Pset_elink_prefix(lapl, "/tmpasodiasod"); - VRFY((ret >= 0), "H5Pset_nlinks succeeded"); - - /* Create FAPL for the elink FAPL */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_alignment(fapl, 2, 1024); - VRFY((ret >= 0), "H5Pset_alignment succeeded"); - - ret = H5Pset_elink_fapl(lapl, fapl); - VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); - - /* Close the elink's FAPL */ - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - ret = test_encode_decode(lapl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(lapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE OCPYPLS *****/ - ocpypl = H5Pcreate(H5P_OBJECT_COPY); - VRFY((ocpypl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_copy_object(ocpypl, H5O_COPY_EXPAND_EXT_LINK_FLAG); - VRFY((ret >= 0), "H5Pset_copy_object succeeded"); - - ret = H5Padd_merge_committed_dtype_path(ocpypl, "foo"); - VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded"); - - ret = H5Padd_merge_committed_dtype_path(ocpypl, "bar"); - VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded"); - - ret = test_encode_decode(ocpypl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(ocpypl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE FAPLS *****/ - fapl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_family_offset(fapl, 1024); - VRFY((ret >= 0), "H5Pset_family_offset succeeded"); - - ret = H5Pset_meta_block_size(fapl, 2098452); - VRFY((ret >= 0), "H5Pset_meta_block_size succeeded"); - - ret = H5Pset_sieve_buf_size(fapl, 1048576); - VRFY((ret >= 0), "H5Pset_sieve_buf_size succeeded"); - - ret = H5Pset_alignment(fapl, 2, 1024); - VRFY((ret >= 0), "H5Pset_alignment succeeded"); - - ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3); - VRFY((ret >= 0), "H5Pset_cache succeeded"); - - ret = H5Pset_elink_file_cache_size(fapl, 10485760); - VRFY((ret >= 0), "H5Pset_elink_file_cache_size succeeded"); - - ret = H5Pset_gc_references(fapl, 1); - VRFY((ret >= 0), "H5Pset_gc_references succeeded"); - - ret = H5Pset_small_data_block_size(fapl, 2048); - VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); - - ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - VRFY((ret >= 0), "H5Pset_libver_bounds succeeded"); - - ret = H5Pset_fclose_degree(fapl, H5F_CLOSE_WEAK); - VRFY((ret >= 0), "H5Pset_fclose_degree succeeded"); - - ret = H5Pset_multi_type(fapl, H5FD_MEM_GHEAP); - VRFY((ret >= 0), "H5Pset_multi_type succeeded"); - - ret = H5Pset_mdc_config(fapl, &my_cache_config); - VRFY((ret >= 0), "H5Pset_mdc_config succeeded"); - - ret = test_encode_decode(fapl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE FCPLS *****/ - fcpl = H5Pcreate(H5P_FILE_CREATE); - VRFY((fcpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_userblock(fcpl, 1024); - VRFY((ret >= 0), "H5Pset_userblock succeeded"); - - ret = H5Pset_istore_k(fcpl, 3); - VRFY((ret >= 0), "H5Pset_istore_k succeeded"); - - ret = H5Pset_sym_k(fcpl, 4, 5); - VRFY((ret >= 0), "H5Pset_sym_k succeeded"); - - ret = H5Pset_shared_mesg_nindexes(fcpl, 8); - VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded"); - - ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32); - VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded"); - - ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20); - VRFY((ret >= 0), "H5Pset_shared_mesg_phase_change succeeded"); - - ret = H5Pset_sizes(fcpl, 8, 4); - VRFY((ret >= 0), "H5Pset_sizes succeeded"); - - ret = test_encode_decode(fcpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(fcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE STRCPLS *****/ - strcpl = H5Pcreate(H5P_STRING_CREATE); - VRFY((strcpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_char_encoding(strcpl, H5T_CSET_UTF8); - VRFY((ret >= 0), "H5Pset_char_encoding succeeded"); - - ret = test_encode_decode(strcpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(strcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /******* ENCODE/DECODE ACPLS *****/ - acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE); - VRFY((acpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_char_encoding(acpl, H5T_CSET_UTF8); - VRFY((ret >= 0), "H5Pset_char_encoding succeeded"); - - ret = test_encode_decode(acpl, mpi_rank, recv_proc); - VRFY((ret >= 0), "test_encode_decode succeeded"); - - ret = H5Pclose(acpl); - VRFY((ret >= 0), "H5Pclose succeeded"); -} - -#if 0 -void -external_links(void) -{ - hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */ - hid_t lapl = H5I_INVALID_HID; /* link access prop. list */ - hid_t fapl = H5I_INVALID_HID; /* file access prop. list */ - hid_t gapl = H5I_INVALID_HID; /* group access prop. list */ - hid_t fid = H5I_INVALID_HID; /* file id */ - hid_t group = H5I_INVALID_HID; /* group id */ - int mpi_size, mpi_rank; - - MPI_Comm comm; - int doIO; - int i, mrc; - - herr_t ret; /* Generic return value */ - htri_t tri_status; /* tri return value */ - - const char *filename = "HDF5test.h5"; - const char *filename_ext = "HDF5test_ext.h5"; - const char *group_path = "/Base/Block/Step"; - const char *link_name = "link"; /* external link */ - char link_path[50]; - - if (VERBOSE_MED) - printf("Check external links\n"); - - /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Check MPI communicator access properties are passed to - linked external files */ - - if (mpi_rank == 0) { - - lcpl = H5Pcreate(H5P_LINK_CREATE); - VRFY((lcpl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_create_intermediate_group(lcpl, 1); - VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded"); - - /* Create file to serve as target for external link.*/ - fid = H5Fcreate(filename_ext, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT); - VRFY((group >= 0), "H5Gcreate succeeded"); - - ret = H5Gclose(group); - VRFY((ret >= 0), "H5Gclose succeeded"); - - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - - fapl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl >= 0), "H5Pcreate succeeded"); - - /* Create a new file using the file access property list. */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT); - VRFY((group >= 0), "H5Gcreate succeeded"); - - /* Create external links to the target files. */ - ret = H5Lcreate_external(filename_ext, group_path, group, link_name, H5P_DEFAULT, H5P_DEFAULT); - VRFY((ret >= 0), "H5Lcreate_external succeeded"); - - /* Close and release resources. */ - ret = H5Pclose(lcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Gclose(group); - VRFY((ret >= 0), "H5Gclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - MPI_Barrier(MPI_COMM_WORLD); - - /* - * For the first case, use all the processes. For the second case - * use a sub-communicator to verify the correct communicator is - * being used for the externally linked files. - * There is no way to determine if MPI info is being used for the - * externally linked files. - */ - - for (i = 0; i < 2; i++) { - - comm = MPI_COMM_WORLD; - - if (i == 0) - doIO = 1; - else { - doIO = mpi_rank % 2; - mrc = MPI_Comm_split(MPI_COMM_WORLD, doIO, mpi_rank, &comm); - VRFY((mrc == MPI_SUCCESS), ""); - } - - if (doIO) { - fapl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_fapl_mpio(fapl, comm, MPI_INFO_NULL); - VRFY((fapl >= 0), "H5Pset_fapl_mpio succeeded"); - - fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); - VRFY((fid >= 0), "H5Fopen succeeded"); - - /* test opening a group that is to an external link, the external linked - file should inherit the source file's access properties */ - snprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name); - group = H5Gopen2(fid, link_path, H5P_DEFAULT); - VRFY((group >= 0), "H5Gopen succeeded"); - ret = H5Gclose(group); - VRFY((ret >= 0), "H5Gclose succeeded"); - - /* test opening a group that is external link by setting group - creation property */ - gapl = H5Pcreate(H5P_GROUP_ACCESS); - VRFY((gapl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_elink_fapl(gapl, fapl); - VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); - - group = H5Gopen2(fid, link_path, gapl); - VRFY((group >= 0), "H5Gopen succeeded"); - - ret = H5Gclose(group); - VRFY((ret >= 0), "H5Gclose succeeded"); - - ret = H5Pclose(gapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* test link APIs */ - lapl = H5Pcreate(H5P_LINK_ACCESS); - VRFY((lapl >= 0), "H5Pcreate succeeded"); - - ret = H5Pset_elink_fapl(lapl, fapl); - VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); - - tri_status = H5Lexists(fid, link_path, H5P_DEFAULT); - VRFY((tri_status == true), "H5Lexists succeeded"); - - tri_status = H5Lexists(fid, link_path, lapl); - VRFY((tri_status == true), "H5Lexists succeeded"); - - group = H5Oopen(fid, link_path, H5P_DEFAULT); - VRFY((group >= 0), "H5Oopen succeeded"); - - ret = H5Oclose(group); - VRFY((ret >= 0), "H5Oclose succeeded"); - - group = H5Oopen(fid, link_path, lapl); - VRFY((group >= 0), "H5Oopen succeeded"); - - ret = H5Oclose(group); - VRFY((ret >= 0), "H5Oclose succeeded"); - - ret = H5Pclose(lapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* close the remaining resources */ - - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - if (comm != MPI_COMM_WORLD) { - mrc = MPI_Comm_free(&comm); - VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded"); - } - } - - MPI_Barrier(MPI_COMM_WORLD); - - /* delete the test files */ - if (mpi_rank == 0) { - MPI_File_delete(filename, MPI_INFO_NULL); - MPI_File_delete(filename_ext, MPI_INFO_NULL); - } -} -#endif diff --git a/testpar/API/t_pshutdown.c b/testpar/API/t_pshutdown.c deleted file mode 100644 index fad9ea33b20..00000000000 --- a/testpar/API/t_pshutdown.c +++ /dev/null @@ -1,147 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Purpose: This test creates a file and a bunch of objects in the - * file and then calls MPI_Finalize without closing anything. The - * library should exercise the attribute callback destroy attached to - * MPI_COMM_SELF and terminate the HDF5 library closing all open - * objects. The t_prestart test will read back the file and make sure - * all created objects are there. - */ - -#include "hdf5.h" -#include "testphdf5.h" - -int nerrors = 0; /* errors count */ - -const char *FILENAME[] = {"shutdown.h5", NULL}; - -int -main(int argc, char **argv) -{ - hid_t file_id, dset_id, grp_id; - hid_t fapl, sid, mem_dataspace; - hsize_t dims[RANK], i; - herr_t ret; -#if 0 - char filename[1024]; -#endif - int mpi_size, mpi_rank; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - hsize_t start[RANK]; - hsize_t count[RANK]; - hsize_t stride[RANK]; - hsize_t block[RANK]; - DATATYPE *data_array = NULL; /* data buffer */ - - MPI_Init(&argc, &argv); - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); - - if (MAINPROCESS) { - printf("Testing %-62s", "proper shutdown of HDF5 library"); - fflush(stdout); - } - - /* Set up file access property list with parallel I/O access */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl >= 0), "H5Pcreate succeeded"); - - /* Get the capability flag of the VOL connector being used */ - ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g); - VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded"); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf( - " API functions for basic file, group, or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - MPI_Finalize(); - return 0; - } - - ret = H5Pset_fapl_mpio(fapl, comm, info); - VRFY((ret >= 0), ""); - -#if 0 - h5_fixname(FILENAME[0], fapl, filename, sizeof filename); -#endif - file_id = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((grp_id >= 0), "H5Gcreate succeeded"); - - dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size; - dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size; - sid = H5Screate_simple(RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate succeeded"); - - /* allocate memory for data buffer */ - data_array = (DATATYPE *)malloc(dims[0] * dims[1] * sizeof(DATATYPE)); - VRFY((data_array != NULL), "data_array malloc succeeded"); - - /* Each process takes a slabs of rows. */ - block[0] = dims[0] / (hsize_t)mpi_size; - block[1] = dims[1]; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (hsize_t)mpi_rank * block[0]; - start[1] = 0; - - /* put some trivial data in the data_array */ - for (i = 0; i < dims[0] * dims[1]; i++) - data_array[i] = mpi_rank + 1; - - ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple(RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* write data independently */ - ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release data buffers */ - if (data_array) - free(data_array); - - MPI_Finalize(); - - /* nerrors += GetTestNumErrs(); */ - - if (MAINPROCESS) { - if (0 == nerrors) { - puts(" PASSED"); - fflush(stdout); - } - else { - puts("*FAILED*"); - fflush(stdout); - } - } - - return (nerrors != 0); -} diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c deleted file mode 100644 index 004ce1e35b2..00000000000 --- a/testpar/API/t_shapesame.c +++ /dev/null @@ -1,4484 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - This program will test independent and collective reads and writes between - selections of different rank that non-the-less are deemed as having the - same shape by H5Sselect_shape_same(). - */ - -#define H5S_FRIEND /*suppress error about including H5Spkg */ - -/* Define this macro to indicate that the testing APIs should be available */ -#define H5S_TESTING - -#if 0 -#include "H5Spkg.h" /* Dataspaces */ -#endif - -#include "hdf5.h" -#include "testphdf5.h" - -#ifndef PATH_MAX -#define PATH_MAX 512 -#endif - -/* FILENAME and filenames must have the same number of names. - * Use PARATESTFILE in general and use a separated filename only if the file - * created in one test is accessed by a different test. - * filenames[0] is reserved as the file name for PARATESTFILE. - */ -#define NFILENAME 2 -const char *FILENAME[NFILENAME] = {"ShapeSameTest.h5", NULL}; -char filenames[NFILENAME][PATH_MAX]; -hid_t fapl; /* file access property list */ - -/* On Lustre (and perhaps other parallel file systems?), we have severe - * slow downs if two or more processes attempt to access the same file system - * block. To minimize this problem, we set alignment in the shape same tests - * to the default Lustre block size -- which greatly reduces contention in - * the chunked dataset case. - */ - -#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024)) - -#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */ - -struct hs_dr_pio_test_vars_t { - int mpi_size; - int mpi_rank; - MPI_Comm mpi_comm; - MPI_Info mpi_info; - int test_num; - int edge_size; - int checker_edge_size; - int chunk_edge_size; - int small_rank; - int large_rank; - hid_t dset_type; - uint32_t *small_ds_buf_0; - uint32_t *small_ds_buf_1; - uint32_t *small_ds_buf_2; - uint32_t *small_ds_slice_buf; - uint32_t *large_ds_buf_0; - uint32_t *large_ds_buf_1; - uint32_t *large_ds_buf_2; - uint32_t *large_ds_slice_buf; - int small_ds_offset; - int large_ds_offset; - hid_t fid; /* HDF5 file ID */ - hid_t xfer_plist; - hid_t full_mem_small_ds_sid; - hid_t full_file_small_ds_sid; - hid_t mem_small_ds_sid; - hid_t file_small_ds_sid_0; - hid_t file_small_ds_sid_1; - hid_t small_ds_slice_sid; - hid_t full_mem_large_ds_sid; - hid_t full_file_large_ds_sid; - hid_t mem_large_ds_sid; - hid_t file_large_ds_sid_0; - hid_t file_large_ds_sid_1; - hid_t file_large_ds_process_slice_sid; - hid_t mem_large_ds_process_slice_sid; - hid_t large_ds_slice_sid; - hid_t small_dataset; /* Dataset ID */ - hid_t large_dataset; /* Dataset ID */ - size_t small_ds_size; - size_t small_ds_slice_size; - size_t large_ds_size; - size_t large_ds_slice_size; - hsize_t dims[PAR_SS_DR_MAX_RANK]; - hsize_t chunk_dims[PAR_SS_DR_MAX_RANK]; - hsize_t start[PAR_SS_DR_MAX_RANK]; - hsize_t stride[PAR_SS_DR_MAX_RANK]; - hsize_t count[PAR_SS_DR_MAX_RANK]; - hsize_t block[PAR_SS_DR_MAX_RANK]; - hsize_t *start_ptr; - hsize_t *stride_ptr; - hsize_t *count_ptr; - hsize_t *block_ptr; - int skips; - int max_skips; - int64_t total_tests; - int64_t tests_run; - int64_t tests_skipped; -}; - -/*------------------------------------------------------------------------- - * Function: hs_dr_pio_test__setup() - * - * Purpose: Do setup for tests of I/O to/from hyperslab selections of - * different rank in the parallel case. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0 - -static void -hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size, - const int chunk_edge_size, const int small_rank, const int large_rank, - const bool use_collective_io, const hid_t dset_type, const int express_test, - struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG - const char *fcnName = "hs_dr_pio_test__setup()"; -#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */ - const char *filename; - bool mis_match = false; - int i; - int mrc; - int mpi_rank; /* needed by the VRFY macro */ - uint32_t expected_value; - uint32_t *ptr_0; - uint32_t *ptr_1; - hid_t acc_tpl; /* File access templates */ - hid_t small_ds_dcpl_id = H5P_DEFAULT; - hid_t large_ds_dcpl_id = H5P_DEFAULT; - herr_t ret; /* Generic return value */ - - assert(edge_size >= 6); - assert(edge_size >= chunk_edge_size); - assert((chunk_edge_size == 0) || (chunk_edge_size >= 3)); - assert(1 < small_rank); - assert(small_rank < large_rank); - assert(large_rank <= PAR_SS_DR_MAX_RANK); - - tv_ptr->test_num = test_num; - tv_ptr->edge_size = edge_size; - tv_ptr->checker_edge_size = checker_edge_size; - tv_ptr->chunk_edge_size = chunk_edge_size; - tv_ptr->small_rank = small_rank; - tv_ptr->large_rank = large_rank; - tv_ptr->dset_type = dset_type; - - MPI_Comm_size(MPI_COMM_WORLD, &(tv_ptr->mpi_size)); - MPI_Comm_rank(MPI_COMM_WORLD, &(tv_ptr->mpi_rank)); - /* the VRFY() macro needs the local variable mpi_rank -- set it up now */ - mpi_rank = tv_ptr->mpi_rank; - - assert(tv_ptr->mpi_size >= 1); - - tv_ptr->mpi_comm = MPI_COMM_WORLD; - tv_ptr->mpi_info = MPI_INFO_NULL; - - for (i = 0; i < tv_ptr->small_rank - 1; i++) { - tv_ptr->small_ds_size *= (size_t)(tv_ptr->edge_size); - tv_ptr->small_ds_slice_size *= (size_t)(tv_ptr->edge_size); - } - tv_ptr->small_ds_size *= (size_t)(tv_ptr->mpi_size + 1); - - /* used by checker board tests only */ - tv_ptr->small_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->small_rank; - - assert(0 < tv_ptr->small_ds_offset); - assert(tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK); - - for (i = 0; i < tv_ptr->large_rank - 1; i++) { - - tv_ptr->large_ds_size *= (size_t)(tv_ptr->edge_size); - tv_ptr->large_ds_slice_size *= (size_t)(tv_ptr->edge_size); - } - tv_ptr->large_ds_size *= (size_t)(tv_ptr->mpi_size + 1); - - /* used by checker board tests only */ - tv_ptr->large_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->large_rank; - - assert(0 <= tv_ptr->large_ds_offset); - assert(tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK); - - /* set up the start, stride, count, and block pointers */ - /* used by contiguous tests only */ - tv_ptr->start_ptr = &(tv_ptr->start[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); - tv_ptr->stride_ptr = &(tv_ptr->stride[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); - tv_ptr->count_ptr = &(tv_ptr->count[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); - tv_ptr->block_ptr = &(tv_ptr->block[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); - - /* Allocate buffers */ - tv_ptr->small_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_size); - VRFY((tv_ptr->small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded"); - - tv_ptr->small_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_size); - VRFY((tv_ptr->small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded"); - - tv_ptr->small_ds_buf_2 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_size); - VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded"); - - tv_ptr->small_ds_slice_buf = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size); - VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded"); - - tv_ptr->large_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_size); - VRFY((tv_ptr->large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded"); - - tv_ptr->large_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_size); - VRFY((tv_ptr->large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded"); - - tv_ptr->large_ds_buf_2 = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_size); - VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded"); - - tv_ptr->large_ds_slice_buf = (uint32_t *)malloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size); - VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded"); - - /* initialize the buffers */ - - ptr_0 = tv_ptr->small_ds_buf_0; - for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) - *ptr_0++ = (uint32_t)i; - memset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); - memset(tv_ptr->small_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); - - memset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); - - ptr_0 = tv_ptr->large_ds_buf_0; - for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) - *ptr_0++ = (uint32_t)i; - memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); - memset(tv_ptr->large_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); - - memset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size); - - filename = filenames[0]; /* (const char *)GetTestParameters(); */ - assert(filename != NULL); -#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG - if (MAINPROCESS) { - - fprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num); - fprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size); - fprintf(stdout, "%d: small/large rank = %d/%d, use_collective_io = %d.\n", tv_ptr->mpi_rank, - tv_ptr->small_rank, tv_ptr->large_rank, (int)use_collective_io); - fprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->edge_size, - tv_ptr->chunk_edge_size); - fprintf(stdout, "%d: checker_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->checker_edge_size); - fprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", tv_ptr->mpi_rank, - (int)(tv_ptr->small_ds_size), (int)(tv_ptr->large_ds_size)); - fprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename); - } -#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */ - /* ---------------------------------------- - * CREATE AN HDF5 FILE WITH PARALLEL ACCESS - * ---------------------------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(tv_ptr->mpi_comm, tv_ptr->mpi_info, facc_type); - VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded"); - - /* set the alignment -- need it large so that we aren't always hitting the - * the same file system block. Do this only if express_test is greater - * than zero. - */ - if (express_test > 0) { - - ret = H5Pset_alignment(acc_tpl, (hsize_t)0, SHAPE_SAME_TEST_ALIGNMENT); - VRFY((ret != FAIL), "H5Pset_alignment() succeeded"); - } - - /* create the file collectively */ - tv_ptr->fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((tv_ptr->fid >= 0), "H5Fcreate succeeded"); - - MESG("File opened."); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); - - /* setup dims: */ - tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1); - tv_ptr->dims[1] = tv_ptr->dims[2] = tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size); - - /* Create small ds dataspaces */ - tv_ptr->full_mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded"); - - tv_ptr->full_file_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded"); - - tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded"); - - tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_small_ds_sid_0 != 0), "H5Screate_simple() file_small_ds_sid_0 succeeded"); - - /* used by checker board tests only */ - tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_small_ds_sid_1 != 0), "H5Screate_simple() file_small_ds_sid_1 succeeded"); - - tv_ptr->small_ds_slice_sid = H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL); - VRFY((tv_ptr->small_ds_slice_sid != 0), "H5Screate_simple() small_ds_slice_sid succeeded"); - - /* Create large ds dataspaces */ - tv_ptr->full_mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded"); - - tv_ptr->full_file_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_file_large_ds_sid != FAIL), "H5Screate_simple() full_file_large_ds_sid succeeded"); - - tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->mem_large_ds_sid != FAIL), "H5Screate_simple() mem_large_ds_sid succeeded"); - - tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), "H5Screate_simple() file_large_ds_sid_0 succeeded"); - - /* used by checker board tests only */ - tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), "H5Screate_simple() file_large_ds_sid_1 succeeded"); - - tv_ptr->mem_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL), - "H5Screate_simple() mem_large_ds_process_slice_sid succeeded"); - - tv_ptr->file_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL), - "H5Screate_simple() file_large_ds_process_slice_sid succeeded"); - - tv_ptr->large_ds_slice_sid = H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL); - VRFY((tv_ptr->large_ds_slice_sid != 0), "H5Screate_simple() large_ds_slice_sid succeeded"); - - /* if chunk edge size is greater than zero, set up the small and - * large data set creation property lists to specify chunked - * datasets. - */ - if (tv_ptr->chunk_edge_size > 0) { - - /* Under Lustre (and perhaps other parallel file systems?) we get - * locking delays when two or more processes attempt to access the - * same file system block. - * - * To minimize this problem, I have changed chunk_dims[0] - * from (mpi_size + 1) to just when any sort of express test is - * selected. Given the structure of the test, and assuming we - * set the alignment large enough, this avoids the contention - * issue by seeing to it that each chunk is only accessed by one - * process. - * - * One can argue as to whether this is a good thing to do in our - * tests, but for now it is necessary if we want the test to complete - * in a reasonable amount of time. - * - * JRM -- 9/16/10 - */ - - tv_ptr->chunk_dims[0] = 1; - - tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = tv_ptr->chunk_dims[3] = tv_ptr->chunk_dims[4] = - (hsize_t)(tv_ptr->chunk_edge_size); - - small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded"); - - ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED); - VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded"); - - ret = H5Pset_chunk(small_ds_dcpl_id, tv_ptr->small_rank, tv_ptr->chunk_dims); - VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); - - large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded"); - - ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED); - VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded"); - - ret = H5Pset_chunk(large_ds_dcpl_id, tv_ptr->large_rank, tv_ptr->chunk_dims); - VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded"); - } - - /* create the small dataset */ - tv_ptr->small_dataset = - H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, tv_ptr->file_small_ds_sid_0, H5P_DEFAULT, - small_ds_dcpl_id, H5P_DEFAULT); - VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded"); - - /* create the large dataset */ - tv_ptr->large_dataset = - H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, tv_ptr->file_large_ds_sid_0, H5P_DEFAULT, - large_ds_dcpl_id, H5P_DEFAULT); - VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded"); - - /* setup xfer property list */ - tv_ptr->xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((tv_ptr->xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - if (use_collective_io) { - ret = H5Pset_dxpl_mpio(tv_ptr->xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - - /* setup selection to write initial data to the small and large data sets */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; - - for (i = 1; i < tv_ptr->large_rank; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - - /* setup selections for writing initial data to the small data set */ - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); - - if (MAINPROCESS) { /* add an additional slice to the selections */ - - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size); - - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) succeeded"); - } - - /* write the initial value of the small data set to file */ - ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); - - VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); - - /* sync with the other processes before checking data */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); - - /* read the small data set back to verify that it contains the - * expected data. Note that each process reads in the entire - * data set and verifies it. - */ - ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_small_ds_sid, - tv_ptr->full_file_small_ds_sid, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); - VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded"); - - /* verify that the correct data was written to the small data set */ - expected_value = 0; - mis_match = false; - ptr_1 = tv_ptr->small_ds_buf_1; - - i = 0; - for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - ptr_1++; - expected_value++; - } - VRFY((mis_match == false), "small ds init data good."); - - /* setup selections for writing initial data to the large data set */ - - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded"); - - /* In passing, setup the process slice dataspaces as well */ - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start, - tv_ptr->stride, tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start, - tv_ptr->stride, tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) succeeded"); - - if (MAINPROCESS) { /* add an additional slice to the selections */ - - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size); - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) succeeded"); - } - - /* write the initial value of the large data set to file */ - ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, tv_ptr->mem_large_ds_sid, - tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stderr); - VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); - - /* sync with the other processes before checking data */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); - - /* read the large data set back to verify that it contains the - * expected data. Note that each process reads in the entire - * data set. - */ - ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_large_ds_sid, - tv_ptr->full_file_large_ds_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); - VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded"); - - /* verify that the correct data was written to the large data set */ - expected_value = 0; - mis_match = false; - ptr_1 = tv_ptr->large_ds_buf_1; - - i = 0; - for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - ptr_1++; - expected_value++; - } - VRFY((mis_match == false), "large ds init data good."); - - /* sync with the other processes before changing data */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync initial values check"); - - return; - -} /* hs_dr_pio_test__setup() */ - -/*------------------------------------------------------------------------- - * Function: hs_dr_pio_test__takedown() - * - * Purpose: Do takedown after tests of I/O to/from hyperslab selections - * of different rank in the parallel case. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define HS_DR_PIO_TEST__TAKEDOWN__DEBUG 0 - -static void -hs_dr_pio_test__takedown(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG - const char *fcnName = "hs_dr_pio_test__takedown()"; -#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */ - int mpi_rank; /* needed by the VRFY macro */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* Close property lists */ - if (tv_ptr->xfer_plist != H5P_DEFAULT) { - ret = H5Pclose(tv_ptr->xfer_plist); - VRFY((ret != FAIL), "H5Pclose(xfer_plist) succeeded"); - } - - /* Close dataspaces */ - ret = H5Sclose(tv_ptr->full_mem_small_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded"); - - ret = H5Sclose(tv_ptr->full_file_small_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded"); - - ret = H5Sclose(tv_ptr->mem_small_ds_sid); - VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded"); - - ret = H5Sclose(tv_ptr->file_small_ds_sid_0); - VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_0) succeeded"); - - ret = H5Sclose(tv_ptr->file_small_ds_sid_1); - VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_1) succeeded"); - - ret = H5Sclose(tv_ptr->small_ds_slice_sid); - VRFY((ret != FAIL), "H5Sclose(small_ds_slice_sid) succeeded"); - - ret = H5Sclose(tv_ptr->full_mem_large_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded"); - - ret = H5Sclose(tv_ptr->full_file_large_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded"); - - ret = H5Sclose(tv_ptr->mem_large_ds_sid); - VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded"); - - ret = H5Sclose(tv_ptr->file_large_ds_sid_0); - VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_0) succeeded"); - - ret = H5Sclose(tv_ptr->file_large_ds_sid_1); - VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_1) succeeded"); - - ret = H5Sclose(tv_ptr->mem_large_ds_process_slice_sid); - VRFY((ret != FAIL), "H5Sclose(mem_large_ds_process_slice_sid) succeeded"); - - ret = H5Sclose(tv_ptr->file_large_ds_process_slice_sid); - VRFY((ret != FAIL), "H5Sclose(file_large_ds_process_slice_sid) succeeded"); - - ret = H5Sclose(tv_ptr->large_ds_slice_sid); - VRFY((ret != FAIL), "H5Sclose(large_ds_slice_sid) succeeded"); - - /* Close Datasets */ - ret = H5Dclose(tv_ptr->small_dataset); - VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded"); - - ret = H5Dclose(tv_ptr->large_dataset); - VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded"); - - /* close the file collectively */ - MESG("about to close file."); - ret = H5Fclose(tv_ptr->fid); - VRFY((ret != FAIL), "file close succeeded"); - - /* Free memory buffers */ - - if (tv_ptr->small_ds_buf_0 != NULL) - free(tv_ptr->small_ds_buf_0); - if (tv_ptr->small_ds_buf_1 != NULL) - free(tv_ptr->small_ds_buf_1); - if (tv_ptr->small_ds_buf_2 != NULL) - free(tv_ptr->small_ds_buf_2); - if (tv_ptr->small_ds_slice_buf != NULL) - free(tv_ptr->small_ds_slice_buf); - - if (tv_ptr->large_ds_buf_0 != NULL) - free(tv_ptr->large_ds_buf_0); - if (tv_ptr->large_ds_buf_1 != NULL) - free(tv_ptr->large_ds_buf_1); - if (tv_ptr->large_ds_buf_2 != NULL) - free(tv_ptr->large_ds_buf_2); - if (tv_ptr->large_ds_slice_buf != NULL) - free(tv_ptr->large_ds_slice_buf); - - return; - -} /* hs_dr_pio_test__takedown() */ - -/*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__d2m_l2s() - * - * Purpose: Part one of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can read from disk correctly using - * selections of different rank that H5Sselect_shape_same() - * views as being of the same shape. - * - * In this function, we test this by reading small_rank - 1 - * slices from the on disk large cube, and verifying that the - * data read is correct. Verify that H5Sselect_shape_same() - * returns true on the memory and file selections. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0 - -static void -contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG - const char *fcnName = "contig_hs_dr_pio_test__run_test()"; -#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - bool mis_match = false; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - uint32_t expected_value; - uint32_t *ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* We have already done a H5Sselect_all() on the dataspace - * small_ds_slice_sid in the initialization phase, so no need to - * call H5Sselect_all() again. - */ - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to read slices of the large cube. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - - /* zero out the buffer we will be reading into */ - memset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); - -#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG - fprintf(stdout, "%s reading slices from big cube on disk into small cube slice.\n", fcnName); -#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - - /* in serial versions of this test, we loop through all the dimensions - * of the large data set. However, in the parallel version, each - * process only works with that slice of the large cube indicated - * by its rank -- hence we set the most slowly changing index to - * mpi_rank, and don't iterate over it. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank - 1 >= 1 and that - * large_rank > small_rank by the assertions at the head - * of this function. Thus no need for another inner loop. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr, - tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); - VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_cube_sid) succeeded"); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0); - VRFY((check == true), "H5Sselect_shape_same passed"); - - /* Read selection from disk */ -#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), - (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); - fprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName, - H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); -#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - ret = - H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid, - tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf); - VRFY((ret >= 0), "H5Dread() slice from large ds succeeded."); - - /* verify that expected data is retrieved */ - - mis_match = false; - ptr_1 = tv_ptr->small_ds_slice_buf; - expected_value = - (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - - for (n = 0; n < tv_ptr->small_ds_slice_size; n++) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - - *ptr_1 = 0; /* zero data for next use */ - - ptr_1++; - expected_value++; - } - - VRFY((mis_match == false), "small slice read from large ds data good."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* contig_hs_dr_pio_test__d2m_l2s() */ - -/*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__d2m_s2l() - * - * Purpose: Part two of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can read from disk correctly using - * selections of different rank that H5Sselect_shape_same() - * views as being of the same shape. - * - * In this function, we test this by reading slices of the - * on disk small data set into slices through the in memory - * large data set, and verify that the correct data (and - * only the correct data) is read. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0 - -static void -contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG - const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()"; -#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - bool mis_match = false; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t *ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* Read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct - * data (and only the correct data) is read. - */ - - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; - - for (i = 1; i < tv_ptr->large_rank; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); - -#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG - fprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName); -#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - - /* zero out the in memory large ds */ - memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to read slices of the large cube. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - - /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. - * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't iterate - * over it. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr, - tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); - VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid) succeeded"); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); - VRFY((check == true), "H5Sselect_shape_same passed"); - - /* Read selection from disk */ -#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), - (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); - fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); -#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); - VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); - - /* verify that the expected data and only the - * expected data was read. - */ - ptr_1 = tv_ptr->large_ds_buf_1; - expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - start_index = - (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - - assert(start_index < stop_index); - assert(stop_index <= tv_ptr->large_ds_size); - - for (n = 0; n < tv_ptr->large_ds_size; n++) { - - if ((n >= start_index) && (n <= stop_index)) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - expected_value++; - } - else { - - if (*ptr_1 != 0) { - - mis_match = true; - } - } - /* zero out the value for the next pass */ - *ptr_1 = 0; - - ptr_1++; - } - - VRFY((mis_match == false), "small slice read from large ds data good."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* contig_hs_dr_pio_test__d2m_s2l() */ - -/*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__m2d_l2s() - * - * Purpose: Part three of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can write from memory to file using - * selections of different rank that H5Sselect_shape_same() - * views as being of the same shape. - * - * Do this by writing small_rank - 1 dimensional slices from - * the in memory large data set to the on disk small cube - * dataset. After each write, read the slice of the small - * dataset back from disk, and verify that it contains - * the expected data. Verify that H5Sselect_shape_same() - * returns true on the memory and file selections. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0 - -static void -contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG - const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()"; -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - bool mis_match = false; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t *ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* now we go in the opposite direction, verifying that we can write - * from memory to file using selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * Start by writing small_rank - 1 dimensional slices from the in memory large - * data set to the on disk small cube dataset. After each write, read the - * slice of the small dataset back from disk, and verify that it contains - * the expected data. Verify that H5Sselect_shape_same() returns true on - * the memory and file selections. - */ - - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; - - for (i = 1; i < tv_ptr->large_rank; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to read slices of the large cube. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - - /* zero out the in memory small ds */ - memset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); - -#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG - fprintf(stdout, "%s writing slices from big ds to slices of small ds on disk.\n", fcnName); -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - - /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. - * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't iterate - * over it. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - j = 0; - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - /* zero out this rank's slice of the on disk small data set */ - ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2); - VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded."); - - /* select the portion of the in memory large cube from which we - * are going to write data. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr, - tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); - VRFY((ret >= 0), "H5Sselect_hyperslab() mem_large_ds_sid succeeded."); - - /* verify that H5Sselect_shape_same() reports the in - * memory slice through the cube selection and the - * on disk full square selections as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); - VRFY((check == true), "H5Sselect_shape_same passed."); - - /* write the slice from the in memory large data set to the - * slice of the on disk small dataset. */ -#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), - (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); - fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); - VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded."); - - /* read the on disk square into memory */ - ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); - VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); - - /* verify that expected data is retrieved */ - - mis_match = false; - ptr_1 = tv_ptr->small_ds_buf_1; - - expected_value = - (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - - start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size; - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - - assert(start_index < stop_index); - assert(stop_index <= tv_ptr->small_ds_size); - - for (n = 0; n < tv_ptr->small_ds_size; n++) { - - if ((n >= start_index) && (n <= stop_index)) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - expected_value++; - } - else { - - if (*ptr_1 != 0) { - - mis_match = true; - } - } - /* zero out the value for the next pass */ - *ptr_1 = 0; - - ptr_1++; - } - - VRFY((mis_match == false), "small slice write from large ds data good."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* contig_hs_dr_pio_test__m2d_l2s() */ - -/*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__m2d_s2l() - * - * Purpose: Part four of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can write from memory to file using - * selections of different rank that H5Sselect_shape_same() - * views as being of the same shape. - * - * Do this by writing the contents of the process's slice of - * the in memory small data set to slices of the on disk - * large data set. After each write, read the process's - * slice of the large data set back into memory, and verify - * that it contains the expected data. - * - * Verify that H5Sselect_shape_same() returns true on the - * memory and file selections. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0 - -static void -contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG - const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()"; -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - bool mis_match = false; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t *ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After - * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5Sselect_shape_same() returns true on the memory - * and file selections. - */ - - /* select the slice of the in memory small data set associated with - * the process's mpi rank. - */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; - - for (i = 1; i < tv_ptr->large_rank; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to write slices of the small data set to - * slices of the large data set. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - - /* zero out the in memory large ds */ - memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); - -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG - fprintf(stdout, "%s writing process slices of small ds to slices of large ds on disk.\n", fcnName); -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - fprintf(stdout, "%s:%d: skipping test with start = %d %d %d %d %d.\n", fcnName, - (int)(tv_ptr->mpi_rank), (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), - (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); - fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - /* Zero out this processes slice of the on disk large data set. - * Note that this will leave one slice with its original data - * as there is one more slice than processes. - */ - ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid, - tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_2); - VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded"); - - /* select the portion of the in memory large cube to which we - * are going to write data. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr, - tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); - VRFY((ret != FAIL), "H5Sselect_hyperslab() target large ds slice succeeded"); - - /* verify that H5Sselect_shape_same() reports the in - * memory small data set slice selection and the - * on disk slice through the large data set selection - * as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0); - VRFY((check == true), "H5Sselect_shape_same passed"); - - /* write the small data set slice from memory to the - * target slice of the disk data set - */ -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), - (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); - fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, - tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); - VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded"); - - /* read this processes slice on the on disk large - * data set into memory. - */ - - ret = H5Dread( - tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_process_slice_sid, - tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); - VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded"); - - /* verify that the expected data and only the - * expected data was read. - */ - ptr_1 = tv_ptr->large_ds_buf_1; - expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - - start_index = - (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - - assert(start_index < stop_index); - assert(stop_index < tv_ptr->large_ds_size); - - for (n = 0; n < tv_ptr->large_ds_size; n++) { - - if ((n >= start_index) && (n <= stop_index)) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - - expected_value++; - } - else { - - if (*ptr_1 != 0) { - - mis_match = true; - } - } - /* zero out buffer for next test */ - *ptr_1 = 0; - ptr_1++; - } - - VRFY((mis_match == false), "small ds slice write to large ds slice data good."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* contig_hs_dr_pio_test__m2d_s2l() */ - -/*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__run_test() - * - * Purpose: Test I/O to/from hyperslab selections of different rank in - * the parallel. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 - -static void -contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size, - const int small_rank, const int large_rank, const bool use_collective_io, - const hid_t dset_type, int express_test, int *skips_ptr, int max_skips, - int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, - int mpi_rank) -{ -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - const char *fcnName = "contig_hs_dr_pio_test__run_test()"; -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - struct hs_dr_pio_test_vars_t test_vars = { - /* int mpi_size = */ -1, - /* int mpi_rank = */ -1, - /* MPI_Comm mpi_comm = */ MPI_COMM_NULL, - /* MPI_Inf mpi_info = */ MPI_INFO_NULL, - /* int test_num = */ -1, - /* int edge_size = */ -1, - /* int checker_edge_size = */ -1, - /* int chunk_edge_size = */ -1, - /* int small_rank = */ -1, - /* int large_rank = */ -1, - /* hid_t dset_type = */ -1, - /* uint32_t * small_ds_buf_0 = */ NULL, - /* uint32_t * small_ds_buf_1 = */ NULL, - /* uint32_t * small_ds_buf_2 = */ NULL, - /* uint32_t * small_ds_slice_buf = */ NULL, - /* uint32_t * large_ds_buf_0 = */ NULL, - /* uint32_t * large_ds_buf_1 = */ NULL, - /* uint32_t * large_ds_buf_2 = */ NULL, - /* uint32_t * large_ds_slice_buf = */ NULL, - /* int small_ds_offset = */ -1, - /* int large_ds_offset = */ -1, - /* hid_t fid = */ -1, /* HDF5 file ID */ - /* hid_t xfer_plist = */ H5P_DEFAULT, - /* hid_t full_mem_small_ds_sid = */ -1, - /* hid_t full_file_small_ds_sid = */ -1, - /* hid_t mem_small_ds_sid = */ -1, - /* hid_t file_small_ds_sid_0 = */ -1, - /* hid_t file_small_ds_sid_1 = */ -1, - /* hid_t small_ds_slice_sid = */ -1, - /* hid_t full_mem_large_ds_sid = */ -1, - /* hid_t full_file_large_ds_sid = */ -1, - /* hid_t mem_large_ds_sid = */ -1, - /* hid_t file_large_ds_sid_0 = */ -1, - /* hid_t file_large_ds_sid_1 = */ -1, - /* hid_t file_large_ds_process_slice_sid = */ -1, - /* hid_t mem_large_ds_process_slice_sid = */ -1, - /* hid_t large_ds_slice_sid = */ -1, - /* hid_t small_dataset = */ -1, /* Dataset ID */ - /* hid_t large_dataset = */ -1, /* Dataset ID */ - /* size_t small_ds_size = */ 1, - /* size_t small_ds_slice_size = */ 1, - /* size_t large_ds_size = */ 1, - /* size_t large_ds_slice_size = */ 1, - /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t * start_ptr = */ NULL, - /* hsize_t * stride_ptr = */ NULL, - /* hsize_t * count_ptr = */ NULL, - /* hsize_t * block_ptr = */ NULL, - /* int skips = */ 0, - /* int max_skips = */ 0, - /* int64_t total_tests = */ 0, - /* int64_t tests_run = */ 0, - /* int64_t tests_skipped = */ 0}; - struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; - - if (MAINPROCESS) - printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), - small_rank, large_rank); - - hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io, - dset_type, express_test, tv_ptr); - - /* initialize skips & max_skips */ - tv_ptr->skips = *skips_ptr; - tv_ptr->max_skips = max_skips; - -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank); - fprintf(stdout, "test %d: Initialization complete.\n", test_num); - } -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - - /* first, verify that we can read from disk correctly using selections - * of different rank that H5Sselect_shape_same() views as being of the - * same shape. - * - * Start by reading small_rank - 1 dimensional slice from the on disk - * large cube, and verifying that the data read is correct. Verify that - * H5Sselect_shape_same() returns true on the memory and file selections. - */ - -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num); - } -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - contig_hs_dr_pio_test__d2m_l2s(tv_ptr); - - /* Second, read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct - * data (and only the correct data) is read. - */ - -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num); - } -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - contig_hs_dr_pio_test__d2m_s2l(tv_ptr); - - /* now we go in the opposite direction, verifying that we can write - * from memory to file using selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * Start by writing small_rank - 1 D slices from the in memory large data - * set to the on disk small cube dataset. After each write, read the - * slice of the small dataset back from disk, and verify that it contains - * the expected data. Verify that H5Sselect_shape_same() returns true on - * the memory and file selections. - */ - -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num); - } -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - contig_hs_dr_pio_test__m2d_l2s(tv_ptr); - - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After - * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5Sselect_shape_same() returns true on the memory - * and file selections. - */ - -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num); - } -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - contig_hs_dr_pio_test__m2d_s2l(tv_ptr); - -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", test_num, - (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), - (long long)(tv_ptr->total_tests)); - } -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - - hs_dr_pio_test__takedown(tv_ptr); - -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: Takedown complete.\n", test_num); - } -#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - - *skips_ptr = tv_ptr->skips; - *total_tests_ptr += tv_ptr->total_tests; - *tests_run_ptr += tv_ptr->tests_run; - *tests_skipped_ptr += tv_ptr->tests_skipped; - - return; - -} /* contig_hs_dr_pio_test__run_test() */ - -/*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) - * - * Purpose: Test I/O to/from hyperslab selections of different rank in - * the parallel case. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CONTIG_HS_DR_PIO_TEST__DEBUG 0 - -static void -contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) -{ - int express_test; - int local_express_test; - int mpi_rank = -1; - int mpi_size; - int test_num = 0; - int edge_size; - int chunk_edge_size = 0; - int small_rank; - int large_rank; - int mpi_result; - int skips = 0; - int max_skips = 0; - /* The following table list the number of sub-tests skipped between - * each test that is actually executed as a function of the express - * test level. Note that any value in excess of 4880 will cause all - * sub tests to be skipped. - */ - int max_skips_tbl[4] = {0, 4, 64, 1024}; - hid_t dset_type = H5T_NATIVE_UINT; - int64_t total_tests = 0; - int64_t tests_run = 0; - int64_t tests_skipped = 0; - - HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); - - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - edge_size = (mpi_size > 6 ? mpi_size : 6); - - local_express_test = EXPRESS_MODE; /* GetTestExpress(); */ - - mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX, - MPI_COMM_WORLD); - - VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded"); - - if (local_express_test < 0) { - max_skips = max_skips_tbl[0]; - } - else if (local_express_test > 3) { - max_skips = max_skips_tbl[3]; - } - else { - max_skips = max_skips_tbl[local_express_test]; - } - - for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) { - - for (small_rank = 2; small_rank < large_rank; small_rank++) { - - switch (sstest_type) { - case IND_CONTIG: - /* contiguous data set, independent I/O */ - chunk_edge_size = 0; - - contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, - express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case IND_CONTIG */ - - case COL_CONTIG: - /* contiguous data set, collective I/O */ - chunk_edge_size = 0; - - contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, - express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case COL_CONTIG */ - - case IND_CHUNKED: - /* chunked data set, independent I/O */ - chunk_edge_size = 5; - - contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, - express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case IND_CHUNKED */ - - case COL_CHUNKED: - /* chunked data set, collective I/O */ - chunk_edge_size = 5; - - contig_hs_dr_pio_test__run_test( - test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, - express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case COL_CHUNKED */ - - default: - VRFY((false), "unknown test type"); - break; - - } /* end of switch(sstest_type) */ -#if CONTIG_HS_DR_PIO_TEST__DEBUG - if ((MAINPROCESS) && (tests_skipped > 0)) { - fprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped, - total_tests); - } -#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */ - } - } - - if (MAINPROCESS) { - if (tests_skipped > 0) { - fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", - tests_skipped, total_tests); - } - else - printf("\n"); - } - - return; - -} /* contig_hs_dr_pio_test() */ - -/**************************************************************** -** -** ckrbrd_hs_dr_pio_test__slct_ckrbrd(): -** Given a dataspace of tgt_rank, and dimensions: -** -** (mpi_size + 1), edge_size, ... , edge_size -** -** edge_size, and a checker_edge_size, select a checker -** board selection of a sel_rank (sel_rank < tgt_rank) -** dimensional slice through the dataspace parallel to the -** sel_rank fastest changing indices, with origin (in the -** higher indices) as indicated by the start array. -** -** Note that this function, like all its relatives, is -** hard coded to presume a maximum dataspace rank of 5. -** While this maximum is declared as a constant, increasing -** it will require extensive coding in addition to changing -** the value of the constant. -** -** JRM -- 10/8/09 -** -****************************************************************/ - -#define CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0 - -static void -ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank, - const int edge_size, const int checker_edge_size, const int sel_rank, - hsize_t sel_start[]) -{ -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():"; -#endif - bool first_selection = true; - int i, j, k, l, m; - int n_cube_offset; - int sel_offset; - const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */ - /* this changes */ - hsize_t base_count; - hsize_t offset_count; - hsize_t start[PAR_SS_DR_MAX_RANK]; - hsize_t stride[PAR_SS_DR_MAX_RANK]; - hsize_t count[PAR_SS_DR_MAX_RANK]; - hsize_t block[PAR_SS_DR_MAX_RANK]; - herr_t ret; /* Generic return value */ - - assert(edge_size >= 6); - assert(0 < checker_edge_size); - assert(checker_edge_size <= edge_size); - assert(0 < sel_rank); - assert(sel_rank <= tgt_rank); - assert(tgt_rank <= test_max_rank); - assert(test_max_rank <= PAR_SS_DR_MAX_RANK); - - sel_offset = test_max_rank - sel_rank; - assert(sel_offset >= 0); - - n_cube_offset = test_max_rank - tgt_rank; - assert(n_cube_offset >= 0); - assert(n_cube_offset <= sel_offset); - -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - fprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", fcnName, mpi_rank, edge_size, - checker_edge_size); - fprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset); - fprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, n_cube_offset); -#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - /* First, compute the base count (which assumes start == 0 - * for the associated offset) and offset_count (which - * assumes start == checker_edge_size for the associated - * offset). - * - * Note that the following computation depends on the C99 - * requirement that integer division discard any fraction - * (truncation towards zero) to function correctly. As we - * now require C99, this shouldn't be a problem, but noting - * it may save us some pain if we are ever obliged to support - * pre-C99 compilers again. - */ - - base_count = (hsize_t)(edge_size / (checker_edge_size * 2)); - - if ((edge_size % (checker_edge_size * 2)) > 0) { - - base_count++; - } - - offset_count = (hsize_t)((edge_size - checker_edge_size) / (checker_edge_size * 2)); - - if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) { - - offset_count++; - } - - /* Now set up the stride and block arrays, and portions of the start - * and count arrays that will not be altered during the selection of - * the checker board. - */ - i = 0; - while (i < n_cube_offset) { - - /* these values should never be used */ - start[i] = 0; - stride[i] = 0; - count[i] = 0; - block[i] = 0; - - i++; - } - - while (i < sel_offset) { - - start[i] = sel_start[i]; - stride[i] = (hsize_t)(2 * edge_size); - count[i] = 1; - block[i] = 1; - - i++; - } - - while (i < test_max_rank) { - - stride[i] = (hsize_t)(2 * checker_edge_size); - block[i] = (hsize_t)checker_edge_size; - - i++; - } - - i = 0; - do { - if (0 >= sel_offset) { - - if (i == 0) { - - start[0] = 0; - count[0] = base_count; - } - else { - - start[0] = (hsize_t)checker_edge_size; - count[0] = offset_count; - } - } - - j = 0; - do { - if (1 >= sel_offset) { - - if (j == 0) { - - start[1] = 0; - count[1] = base_count; - } - else { - - start[1] = (hsize_t)checker_edge_size; - count[1] = offset_count; - } - } - - k = 0; - do { - if (2 >= sel_offset) { - - if (k == 0) { - - start[2] = 0; - count[2] = base_count; - } - else { - - start[2] = (hsize_t)checker_edge_size; - count[2] = offset_count; - } - } - - l = 0; - do { - if (3 >= sel_offset) { - - if (l == 0) { - - start[3] = 0; - count[3] = base_count; - } - else { - - start[3] = (hsize_t)checker_edge_size; - count[3] = offset_count; - } - } - - m = 0; - do { - if (4 >= sel_offset) { - - if (m == 0) { - - start[4] = 0; - count[4] = base_count; - } - else { - - start[4] = (hsize_t)checker_edge_size; - count[4] = offset_count; - } - } - - if (((i + j + k + l + m) % 2) == 0) { - -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - fprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank, - (int)first_selection); - fprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, j, k, - l, m); - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)start[0], (int)start[1], (int)start[2], (int)start[3], - (int)start[4]); - fprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3], - (int)stride[4]); - fprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)count[0], (int)count[1], (int)count[2], (int)count[3], - (int)count[4]); - fprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)block[0], (int)block[1], (int)block[2], (int)block[3], - (int)block[4]); - fprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank, - H5Sget_simple_extent_ndims(tgt_sid)); - fprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank); -#endif - - if (first_selection) { - - first_selection = false; - - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]), - &(stride[n_cube_offset]), &(count[n_cube_offset]), - &(block[n_cube_offset])); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded"); - } - else { - - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[n_cube_offset]), - &(stride[n_cube_offset]), &(count[n_cube_offset]), - &(block[n_cube_offset])); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); - } - } - - m++; - - } while ((m <= 1) && (4 >= sel_offset)); - - l++; - - } while ((l <= 1) && (3 >= sel_offset)); - - k++; - - } while ((k <= 1) && (2 >= sel_offset)); - - j++; - - } while ((j <= 1) && (1 >= sel_offset)); - - i++; - - } while ((i <= 1) && (0 >= sel_offset)); - -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(tgt_sid)); -#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - /* Clip the selection back to the dataspace proper. */ - - for (i = 0; i < test_max_rank; i++) { - - start[i] = 0; - stride[i] = (hsize_t)edge_size; - count[i] = 1; - block[i] = (hsize_t)edge_size; - } - - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded"); - -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(tgt_sid)); - fprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank); -#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - return; - -} /* ckrbrd_hs_dr_pio_test__slct_ckrbrd() */ - -/**************************************************************** -** -** ckrbrd_hs_dr_pio_test__verify_data(): -** -** Examine the supplied buffer to see if it contains the -** expected data. Return true if it does, and false -** otherwise. -** -** The supplied buffer is presumed to this process's slice -** of the target data set. Each such slice will be an -** n-cube of rank (rank -1) and the supplied edge_size with -** origin (mpi_rank, 0, ... , 0) in the target data set. -** -** Further, the buffer is presumed to be the result of reading -** or writing a checker board selection of an m (1 <= m < -** rank) dimensional slice through this processes slice -** of the target data set. Also, this slice must be parallel -** to the fastest changing indices. -** -** It is further presumed that the buffer was zeroed before -** the read/write, and that the full target data set (i.e. -** the buffer/data set for all processes) was initialized -** with the natural numbers listed in order from the origin -** along the fastest changing axis. -** -** Thus for a 20x10x10 dataset, the value stored in location -** (x, y, z) (assuming that z is the fastest changing index -** and x the slowest) is assumed to be: -** -** (10 * 10 * x) + (10 * y) + z -** -** Further, supposing that this is process 10, this process's -** slice of the dataset would be a 10 x 10 2-cube with origin -** (10, 0, 0) in the data set, and would be initialize (prior -** to the checkerboard selection) as follows: -** -** 1000, 1001, 1002, ... 1008, 1009 -** 1010, 1011, 1012, ... 1018, 1019 -** . . . . . -** . . . . . -** . . . . . -** 1090, 1091, 1092, ... 1098, 1099 -** -** In the case of a read from the processors slice of another -** data set of different rank, the values expected will have -** to be adjusted accordingly. This is done via the -** first_expected_val parameter. -** -** Finally, the function presumes that the first element -** of the buffer resides either at the origin of either -** a selected or an unselected checker. (Translation: -** if partial checkers appear in the buffer, they will -** intersect the edges of the n-cube opposite the origin.) -** -****************************************************************/ - -#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0 - -static bool -ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size, - const int checker_edge_size, uint32_t first_expected_val, - bool buf_starts_in_checker) -{ -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():"; -#endif - bool good_data = true; - bool in_checker; - bool start_in_checker[5]; - uint32_t expected_value; - uint32_t *val_ptr; - int i, j, k, l, m; /* to track position in n-cube */ - int v, w, x, y, z; /* to track position in checker */ - const int test_max_rank = 5; /* code changes needed if this is increased */ - - assert(buf_ptr != NULL); - assert(0 < rank); - assert(rank <= test_max_rank); - assert(edge_size >= 6); - assert(0 < checker_edge_size); - assert(checker_edge_size <= edge_size); - assert(test_max_rank <= PAR_SS_DR_MAX_RANK); - -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - fprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank); - fprintf(stdout, "%s rank = %d.\n", fcnName, rank); - fprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size); - fprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size); - fprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val); - fprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker); -} -#endif - -val_ptr = buf_ptr; -expected_value = first_expected_val; - -i = 0; -v = 0; -start_in_checker[0] = buf_starts_in_checker; -do { - if (v >= checker_edge_size) { - - start_in_checker[0] = !start_in_checker[0]; - v = 0; - } - - j = 0; - w = 0; - start_in_checker[1] = start_in_checker[0]; - do { - if (w >= checker_edge_size) { - - start_in_checker[1] = !start_in_checker[1]; - w = 0; - } - - k = 0; - x = 0; - start_in_checker[2] = start_in_checker[1]; - do { - if (x >= checker_edge_size) { - - start_in_checker[2] = !start_in_checker[2]; - x = 0; - } - - l = 0; - y = 0; - start_in_checker[3] = start_in_checker[2]; - do { - if (y >= checker_edge_size) { - - start_in_checker[3] = !start_in_checker[3]; - y = 0; - } - - m = 0; - z = 0; -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - fprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m); -#endif - in_checker = start_in_checker[3]; - do { -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - fprintf(stdout, " %d", (int)(*val_ptr)); -#endif - if (z >= checker_edge_size) { - - in_checker = !in_checker; - z = 0; - } - - if (in_checker) { - - if (*val_ptr != expected_value) { - - good_data = false; - } - - /* zero out buffer for reuse */ - *val_ptr = 0; - } - else if (*val_ptr != 0) { - - good_data = false; - - /* zero out buffer for reuse */ - *val_ptr = 0; - } - - val_ptr++; - expected_value++; - m++; - z++; - - } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - fprintf(stdout, "\n"); -#endif - l++; - y++; - } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); - k++; - x++; - } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); - j++; - w++; - } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); - i++; - v++; -} while ((rank >= test_max_rank) && (i < edge_size)); - -return (good_data); - -} /* ckrbrd_hs_dr_pio_test__verify_data() */ - -/*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__d2m_l2s() - * - * Purpose: Part one of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can read from disk correctly using checker - * board selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * In this function, we test this by reading small_rank - 1 - * checker board slices from the on disk large cube, and - * verifying that the data read is correct. Verify that - * H5Sselect_shape_same() returns true on the memory and - * file selections. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0 - -static void -ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()"; - uint32_t *ptr_0; -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - bool data_ok = false; - int i, j, k, l; - uint32_t expected_value; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* first, verify that we can read from disk correctly using selections - * of different rank that H5Sselect_shape_same() views as being of the - * same shape. - * - * Start by reading a (small_rank - 1)-D checker board slice from this - * processes slice of the on disk large data set, and verifying that the - * data read is correct. Verify that H5Sselect_shape_same() returns - * true on the memory and file selections. - * - * The first step is to set up the needed checker board selection in the - * in memory small small cube - */ - - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->small_ds_slice_sid, tv_ptr->small_rank - 1, - tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, - sel_start); - - /* zero out the buffer we will be reading into */ - memset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); - -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - fprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", fcnName, tv_ptr->mpi_rank); - ptr_0 = tv_ptr->small_ds_slice_buf; - for (i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++) { - fprintf(stdout, "%d ", (int)(*ptr_0)); - ptr_0++; - } - fprintf(stdout, "\n"); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to read slices of the large cube. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - fprintf(stdout, "%s:%d: reading slice from big ds on disk into small ds slice.\n", fcnName, - tv_ptr->mpi_rank); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - /* in serial versions of this test, we loop through all the dimensions - * of the large data set. However, in the parallel version, each - * process only works with that slice of the large cube indicated - * by its rank -- hence we set the most slowly changing index to - * mpi_rank, and don't iterate over it. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank - 1 >= 1 and that - * large_rank > small_rank by the assertions at the head - * of this function. Thus no need for another inner loop. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd( - tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_0, tv_ptr->large_rank, tv_ptr->edge_size, - tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0); - VRFY((check == true), "H5Sselect_shape_same passed"); - - /* Read selection from disk */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, - tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], - tv_ptr->start[4]); - fprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName, - H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - - ret = - H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid, - tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf); - VRFY((ret >= 0), "H5Dread() slice from large ds succeeded."); - -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - fprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, tv_ptr->mpi_rank); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - - /* verify that expected data is retrieved */ - - expected_value = - (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - - data_ok = ckrbrd_hs_dr_pio_test__verify_data( - tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size, - tv_ptr->checker_edge_size, expected_value, (bool)true); - - VRFY((data_ok == true), "small slice read from large ds data good."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* ckrbrd_hs_dr_pio_test__d2m_l2s() */ - -/*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__d2m_s2l() - * - * Purpose: Part two of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can read from disk correctly using - * selections of different rank that H5Sselect_shape_same() - * views as being of the same shape. - * - * In this function, we test this by reading checker board - * slices of the on disk small data set into slices through - * the in memory large data set, and verify that the correct - * data (and only the correct data) is read. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0 - -static void -ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG - const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()"; -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - bool data_ok = false; - int i, j, k, l; - size_t u; - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t *ptr_1; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* similarly, read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct - * data (and only the correct data) is read. - */ - - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_0, tv_ptr->small_rank, - tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, - sel_start); - -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG - fprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - - /* zero out the buffer we will be reading into */ - memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to read the slice of the small data set - * into different slices of the process slice of the large data - * set. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - - /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. - * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't iterate - * over it. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd( - tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size, - tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); - VRFY((check == true), "H5Sselect_shape_same passed"); - - /* Read selection from disk */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, - tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], - tv_ptr->start[4]); - fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); - VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); - - /* verify that the expected data and only the - * expected data was read. - */ - data_ok = true; - ptr_1 = tv_ptr->large_ds_buf_1; - expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - start_index = - (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG - { - int m, n; - - fprintf(stdout, "%s:%d: expected_value = %d.\n", fcnName, tv_ptr->mpi_rank, - expected_value); - fprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - start_index, stop_index); - n = 0; - for (m = 0; (unsigned)m < tv_ptr->large_ds_size; m++) { - fprintf(stdout, "%d ", (int)(*ptr_1)); - ptr_1++; - n++; - if (n >= tv_ptr->edge_size) { - fprintf(stdout, "\n"); - n = 0; - } - } - fprintf(stdout, "\n"); - ptr_1 = tv_ptr->large_ds_buf_1; - } -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - - assert(start_index < stop_index); - assert(stop_index <= tv_ptr->large_ds_size); - - for (u = 0; u < start_index; u++) { - - if (*ptr_1 != 0) { - - data_ok = false; - } - - /* zero out the value for the next pass */ - *ptr_1 = 0; - - ptr_1++; - } - - VRFY((data_ok == true), "slice read from small to large ds data good(1)."); - - data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1, - tv_ptr->edge_size, tv_ptr->checker_edge_size, - expected_value, (bool)true); - - VRFY((data_ok == true), "slice read from small to large ds data good(2)."); - - ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1; - - for (u = stop_index + 1; u < tv_ptr->large_ds_size; u++) { - - if (*ptr_1 != 0) { - - data_ok = false; - } - - /* zero out the value for the next pass */ - *ptr_1 = 0; - - ptr_1++; - } - - VRFY((data_ok == true), "slice read from small to large ds data good(3)."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* ckrbrd_hs_dr_pio_test__d2m_s2l() */ - -/*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__m2d_l2s() - * - * Purpose: Part three of a series of tests of I/O to/from checker - * board hyperslab selections of different rank in the - * parallel. - * - * Verify that we can write from memory to file using checker - * board selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * Do this by writing small_rank - 1 dimensional checker - * board slices from the in memory large data set to the on - * disk small cube dataset. After each write, read the - * slice of the small dataset back from disk, and verify - * that it contains the expected data. Verify that - * H5Sselect_shape_same() returns true on the memory and - * file selections. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0 - -static void -ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG - const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()"; -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - bool data_ok = false; - int i, j, k, l; - size_t u; - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t *ptr_1; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* now we go in the opposite direction, verifying that we can write - * from memory to file using selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * Start by writing small_rank - 1 D slices from the in memory large data - * set to the on disk small dataset. After each write, read the slice of - * the small dataset back from disk, and verify that it contains the - * expected data. Verify that H5Sselect_shape_same() returns true on - * the memory and file selections. - */ - - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; - - for (i = 1; i < tv_ptr->large_rank; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); - - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_1, tv_ptr->small_rank, - tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, - sel_start); - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to read slices of the large cube. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - - /* zero out the in memory small ds */ - memset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); - -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG - fprintf(stdout, - "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n", - fcnName); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - - /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. - * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't iterate - * over it. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - j = 0; - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - /* zero out this rank's slice of the on disk small data set */ - ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2); - VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded."); - - /* select the portion of the in memory large cube from which we - * are going to write data. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd( - tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size, - tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); - - /* verify that H5Sselect_shape_same() reports the in - * memory checkerboard selection of the slice through the - * large dataset and the checkerboard selection of the process - * slice of the small data set as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid); - VRFY((check == true), "H5Sselect_shape_same passed."); - - /* write the checker board selection of the slice from the in - * memory large data set to the slice of the on disk small - * dataset. - */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, - tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], - tv_ptr->start[4]); - fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_1)); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); - VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded."); - - /* read the on disk process slice of the small dataset into memory */ - ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); - VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); - - /* verify that expected data is retrieved */ - - expected_value = - (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - - start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size; - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - - assert(start_index < stop_index); - assert(stop_index <= tv_ptr->small_ds_size); - - data_ok = true; - - ptr_1 = tv_ptr->small_ds_buf_1; - for (u = 0; u < start_index; u++, ptr_1++) { - - if (*ptr_1 != 0) { - - data_ok = false; - *ptr_1 = 0; - } - } - - data_ok &= ckrbrd_hs_dr_pio_test__verify_data( - tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size, - tv_ptr->checker_edge_size, expected_value, (bool)true); - - ptr_1 = tv_ptr->small_ds_buf_1; - for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) { - - if (*ptr_1 != 0) { - - data_ok = false; - *ptr_1 = 0; - } - } - - VRFY((data_ok == true), "large slice write slice to small slice data good."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* ckrbrd_hs_dr_pio_test__m2d_l2s() */ - -/*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__m2d_s2l() - * - * Purpose: Part four of a series of tests of I/O to/from checker - * board hyperslab selections of different rank in the parallel. - * - * Verify that we can write from memory to file using - * selections of different rank that H5Sselect_shape_same() - * views as being of the same shape. - * - * Do this by writing checker board selections of the contents - * of the process's slice of the in memory small data set to - * slices of the on disk large data set. After each write, - * read the process's slice of the large data set back into - * memory, and verify that it contains the expected data. - * - * Verify that H5Sselect_shape_same() returns true on the - * memory and file selections. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0 - -static void -ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) -{ -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG - const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()"; -#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - bool data_ok = false; - int i, j, k, l; - size_t u; - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t *ptr_1; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After - * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5Sselect_shape_same() returns true on the memory - * and file selections. - */ - - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; - - for (i = 1; i < tv_ptr->large_rank; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded"); - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, - tv_ptr->count, tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) succeeded"); - - /* setup a checkerboard selection of the slice of the in memory small - * data set associated with the process's mpi rank. - */ - - sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->mem_small_ds_sid, tv_ptr->small_rank, - tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, - sel_start); - - /* set up start, stride, count, and block -- note that we will - * change start[] so as to write checkerboard selections of slices - * of the small data set to slices of the large data set. - */ - for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - - tv_ptr->start[i] = 0; - tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { - - tv_ptr->block[i] = 1; - } - else { - - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); - } - } - - /* zero out the in memory large ds */ - memset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); - -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG - fprintf(stdout, - "%s writing process checkerboard selections of slices of small ds to process slices of large " - "ds on disk.\n", - fcnName); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { - - i = tv_ptr->mpi_rank; - } - else { - - i = 0; - } - - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to - * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the - * test. - */ - - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { - - j = tv_ptr->mpi_rank; - } - else { - - j = 0; - } - - do { - if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { - - k = tv_ptr->mpi_rank; - } - else { - - k = 0; - } - - do { - /* since small rank >= 2 and large_rank > small_rank, we - * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 - * (baring major re-orgaization), this gives us: - * - * (PAR_SS_DR_MAX_RANK - large_rank) <= 2 - * - * so no need to repeat the test in the outer loops -- - * just set l = 0. - */ - - l = 0; - do { - if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - - (tv_ptr->tests_skipped)++; - } - else { /* run the test */ - - tv_ptr->skips = 0; /* reset the skips counter */ - - /* we know that small_rank >= 1 and that large_rank > small_rank - * by the assertions at the head of this function. Thus no - * need for another inner loop. - */ - - /* Zero out this processes slice of the on disk large data set. - * Note that this will leave one slice with its original data - * as there is one more slice than processes. - */ - ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, - tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2); - VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded"); - - /* select the portion of the in memory large cube to which we - * are going to write data. - */ - tv_ptr->start[0] = (hsize_t)i; - tv_ptr->start[1] = (hsize_t)j; - tv_ptr->start[2] = (hsize_t)k; - tv_ptr->start[3] = (hsize_t)l; - tv_ptr->start[4] = 0; - - assert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); - assert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd( - tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_1, tv_ptr->large_rank, tv_ptr->edge_size, - tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); - - /* verify that H5Sselect_shape_same() reports the in - * memory small data set slice selection and the - * on disk slice through the large data set selection - * as having the same shape. - */ - check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1); - VRFY((check == true), "H5Sselect_shape_same passed"); - - /* write the small data set slice from memory to the - * target slice of the disk data set - */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, - tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], - tv_ptr->start[4]); - fprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, - H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), - H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_1)); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, - tv_ptr->file_large_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); - VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded"); - - /* read this processes slice on the on disk large - * data set into memory. - */ - - ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, - tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); - VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded"); - - /* verify that the expected data and only the - * expected data was read. - */ - expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - - start_index = - (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - - assert(start_index < stop_index); - assert(stop_index < tv_ptr->large_ds_size); - - data_ok = true; - - ptr_1 = tv_ptr->large_ds_buf_1; - for (u = 0; u < start_index; u++, ptr_1++) { - - if (*ptr_1 != 0) { - - data_ok = false; - *ptr_1 = 0; - } - } - - data_ok &= ckrbrd_hs_dr_pio_test__verify_data( - tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size, - tv_ptr->checker_edge_size, expected_value, (bool)true); - - ptr_1 = tv_ptr->large_ds_buf_1; - for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) { - - if (*ptr_1 != 0) { - - data_ok = false; - *ptr_1 = 0; - } - } - - VRFY((data_ok == true), "small ds cb slice write to large ds slice data good."); - - (tv_ptr->tests_run)++; - } - - l++; - - (tv_ptr->total_tests)++; - - } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); - k++; - } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); - j++; - } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); - - return; - -} /* ckrbrd_hs_dr_pio_test__m2d_s2l() */ - -/*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__run_test() - * - * Purpose: Test I/O to/from checkerboard selections of hyperslabs of - * different rank in the parallel. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -#define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 - -static void -ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size, - const int chunk_edge_size, const int small_rank, const int large_rank, - const bool use_collective_io, const hid_t dset_type, const int express_test, - int *skips_ptr, int max_skips, int64_t *total_tests_ptr, - int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, int mpi_rank) - -{ -#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG - const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()"; -#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - struct hs_dr_pio_test_vars_t test_vars = { - /* int mpi_size = */ -1, - /* int mpi_rank = */ -1, - /* MPI_Comm mpi_comm = */ MPI_COMM_NULL, - /* MPI_Inf mpi_info = */ MPI_INFO_NULL, - /* int test_num = */ -1, - /* int edge_size = */ -1, - /* int checker_edge_size = */ -1, - /* int chunk_edge_size = */ -1, - /* int small_rank = */ -1, - /* int large_rank = */ -1, - /* hid_t dset_type = */ -1, - /* uint32_t * small_ds_buf_0 = */ NULL, - /* uint32_t * small_ds_buf_1 = */ NULL, - /* uint32_t * small_ds_buf_2 = */ NULL, - /* uint32_t * small_ds_slice_buf = */ NULL, - /* uint32_t * large_ds_buf_0 = */ NULL, - /* uint32_t * large_ds_buf_1 = */ NULL, - /* uint32_t * large_ds_buf_2 = */ NULL, - /* uint32_t * large_ds_slice_buf = */ NULL, - /* int small_ds_offset = */ -1, - /* int large_ds_offset = */ -1, - /* hid_t fid = */ -1, /* HDF5 file ID */ - /* hid_t xfer_plist = */ H5P_DEFAULT, - /* hid_t full_mem_small_ds_sid = */ -1, - /* hid_t full_file_small_ds_sid = */ -1, - /* hid_t mem_small_ds_sid = */ -1, - /* hid_t file_small_ds_sid_0 = */ -1, - /* hid_t file_small_ds_sid_1 = */ -1, - /* hid_t small_ds_slice_sid = */ -1, - /* hid_t full_mem_large_ds_sid = */ -1, - /* hid_t full_file_large_ds_sid = */ -1, - /* hid_t mem_large_ds_sid = */ -1, - /* hid_t file_large_ds_sid_0 = */ -1, - /* hid_t file_large_ds_sid_1 = */ -1, - /* hid_t file_large_ds_process_slice_sid = */ -1, - /* hid_t mem_large_ds_process_slice_sid = */ -1, - /* hid_t large_ds_slice_sid = */ -1, - /* hid_t small_dataset = */ -1, /* Dataset ID */ - /* hid_t large_dataset = */ -1, /* Dataset ID */ - /* size_t small_ds_size = */ 1, - /* size_t small_ds_slice_size = */ 1, - /* size_t large_ds_size = */ 1, - /* size_t large_ds_slice_size = */ 1, - /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, - /* hsize_t * start_ptr = */ NULL, - /* hsize_t * stride_ptr = */ NULL, - /* hsize_t * count_ptr = */ NULL, - /* hsize_t * block_ptr = */ NULL, - /* int skips = */ 0, - /* int max_skips = */ 0, - /* int64_t total_tests = */ 0, - /* int64_t tests_run = */ 0, - /* int64_t tests_skipped = */ 0}; - struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; - - if (MAINPROCESS) - printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), - small_rank, large_rank); - - hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, - use_collective_io, dset_type, express_test, tv_ptr); - - /* initialize skips & max_skips */ - tv_ptr->skips = *skips_ptr; - tv_ptr->max_skips = max_skips; - -#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank); - fprintf(stdout, "test %d: Initialization complete.\n", test_num); - } -#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - - /* first, verify that we can read from disk correctly using selections - * of different rank that H5Sselect_shape_same() views as being of the - * same shape. - * - * Start by reading a (small_rank - 1)-D slice from this processes slice - * of the on disk large data set, and verifying that the data read is - * correct. Verify that H5Sselect_shape_same() returns true on the - * memory and file selections. - * - * The first step is to set up the needed checker board selection in the - * in memory small small cube - */ - - ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr); - - /* similarly, read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct - * data (and only the correct data) is read. - */ - - ckrbrd_hs_dr_pio_test__d2m_s2l(tv_ptr); - - /* now we go in the opposite direction, verifying that we can write - * from memory to file using selections of different rank that - * H5Sselect_shape_same() views as being of the same shape. - * - * Start by writing small_rank - 1 D slices from the in memory large data - * set to the on disk small dataset. After each write, read the slice of - * the small dataset back from disk, and verify that it contains the - * expected data. Verify that H5Sselect_shape_same() returns true on - * the memory and file selections. - */ - - ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr); - - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After - * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5Sselect_shape_same() returns true on the memory - * and file selections. - */ - - ckrbrd_hs_dr_pio_test__m2d_s2l(tv_ptr); - -#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", test_num, - (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), - (long long)(tv_ptr->total_tests)); - } -#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - - hs_dr_pio_test__takedown(tv_ptr); - -#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if (MAINPROCESS) { - fprintf(stdout, "test %d: Takedown complete.\n", test_num); - } -#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - - *skips_ptr = tv_ptr->skips; - *total_tests_ptr += tv_ptr->total_tests; - *tests_run_ptr += tv_ptr->tests_run; - *tests_skipped_ptr += tv_ptr->tests_skipped; - - return; - -} /* ckrbrd_hs_dr_pio_test__run_test() */ - -/*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test() - * - * Purpose: Test I/O to/from hyperslab selections of different rank in - * the parallel case. - * - * Return: void - * - *------------------------------------------------------------------------- - */ - -static void -ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) -{ - int express_test; - int local_express_test; - int mpi_size = -1; - int mpi_rank = -1; - int test_num = 0; - int edge_size; - int checker_edge_size = 3; - int chunk_edge_size = 0; - int small_rank = 3; - int large_rank = 4; - int mpi_result; - hid_t dset_type = H5T_NATIVE_UINT; - int skips = 0; - int max_skips = 0; - /* The following table list the number of sub-tests skipped between - * each test that is actually executed as a function of the express - * test level. Note that any value in excess of 4880 will cause all - * sub tests to be skipped. - */ - int max_skips_tbl[4] = {0, 4, 64, 1024}; - int64_t total_tests = 0; - int64_t tests_run = 0; - int64_t tests_skipped = 0; - - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - edge_size = (mpi_size > 6 ? mpi_size : 6); - - local_express_test = EXPRESS_MODE; /* GetTestExpress(); */ - - HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); - - mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX, - MPI_COMM_WORLD); - - VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded"); - - if (local_express_test < 0) { - max_skips = max_skips_tbl[0]; - } - else if (local_express_test > 3) { - max_skips = max_skips_tbl[3]; - } - else { - max_skips = max_skips_tbl[local_express_test]; - } - -#if 0 - { - int DebugWait = 1; - - while (DebugWait) ; - } -#endif - - for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) { - - for (small_rank = 2; small_rank < large_rank; small_rank++) { - switch (sstest_type) { - case IND_CONTIG: - /* contiguous data set, independent I/O */ - chunk_edge_size = 0; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, false, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, - &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case IND_CONTIG */ - - case COL_CONTIG: - /* contiguous data set, collective I/O */ - chunk_edge_size = 0; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, true, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, - &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case COL_CONTIG */ - - case IND_CHUNKED: - /* chunked data set, independent I/O */ - chunk_edge_size = 5; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, false, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, - &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case IND_CHUNKED */ - - case COL_CHUNKED: - /* chunked data set, collective I/O */ - chunk_edge_size = 5; - ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, - small_rank, large_rank, true, dset_type, express_test, - &skips, max_skips, &total_tests, &tests_run, - &tests_skipped, mpi_rank); - test_num++; - break; - /* end of case COL_CHUNKED */ - - default: - VRFY((false), "unknown test type"); - break; - - } /* end of switch(sstest_type) */ -#if CONTIG_HS_DR_PIO_TEST__DEBUG - if ((MAINPROCESS) && (tests_skipped > 0)) { - fprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n", tests_run, - tests_skipped, total_tests); - } -#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */ - } - } - - if (MAINPROCESS) { - if (tests_skipped > 0) { - fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", - tests_skipped, total_tests); - } - else - printf("\n"); - } - - return; - -} /* ckrbrd_hs_dr_pio_test() */ - -/* Main Body. Here for now, may have to move them to a separated file later. */ - -/* - * Main driver of the Parallel HDF5 tests - */ - -/* global variables */ -int dim0; -int dim1; -int chunkdim0; -int chunkdim1; -int nerrors = 0; /* errors count */ -int ndatasets = 300; /* number of datasets to create*/ -int ngroups = 512; /* number of groups to create in root - * group. */ -int facc_type = FACC_MPIO; /*Test file access type */ -int dxfer_coll_type = DXFER_COLLECTIVE_IO; - -H5E_auto2_t old_func; /* previous error handler */ -void *old_client_data; /* previous error handler arg.*/ - -/* other option flags */ - -#ifdef USE_PAUSE -/* pause the process for a moment to allow debugger to attach if desired. */ -/* Will pause more if greenlight file is not present but will eventually */ -/* continue. */ -#include -#include - -void -pause_proc(void) -{ - - int pid; - h5_stat_t statbuf; - char greenlight[] = "go"; - int maxloop = 10; - int loops = 0; - int time_int = 10; - - /* mpi variables */ - int mpi_size, mpi_rank; - int mpi_namelen; - char mpi_name[MPI_MAX_PROCESSOR_NAME]; - - pid = getpid(); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Get_processor_name(mpi_name, &mpi_namelen); - - if (MAINPROCESS) - while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) { - if (!loops++) { - printf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, pid); - } - printf("waiting(%ds) for file %s ...\n", time_int, greenlight); - fflush(stdout); - HDsleep(time_int); - } - MPI_Barrier(MPI_COMM_WORLD); -} - -/* Use the Profile feature of MPI to call the pause_proc() */ -int -MPI_Init(int *argc, char ***argv) -{ - int ret_code; - ret_code = PMPI_Init(argc, argv); - pause_proc(); - return (ret_code); -} -#endif /* USE_PAUSE */ - -/* - * Show command usage - */ -static void -usage(void) -{ - printf(" [-r] [-w] [-m] [-n] " - "[-o] [-f ] [-d ]\n"); - printf("\t-m" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n" - "\tset number of groups for the multiple group test\n"); -#if 0 - printf("\t-f \tfilename prefix\n"); -#endif - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, - COL_FACTOR); - printf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); -} - -/* - * parse the command line options - */ -static int -parse_options(int argc, char **argv) -{ - int mpi_size, mpi_rank; /* mpi variables */ - - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* setup default chunk-size. Make sure sizes are > 0 */ - - chunkdim0 = (dim0 + 9) / 10; - chunkdim1 = (dim1 + 9) / 10; - - while (--argc) { - if (**(++argv) != '-') { - break; - } - else { - switch (*(*argv + 1)) { - case 'm': - ndatasets = atoi((*argv + 1) + 1); - if (ndatasets < 0) { - nerrors++; - return (1); - } - break; - case 'n': - ngroups = atoi((*argv + 1) + 1); - if (ngroups < 0) { - nerrors++; - return (1); - } - break; -#if 0 - case 'f': if (--argc < 1) { - nerrors++; - return(1); - } - if (**(++argv) == '-') { - nerrors++; - return(1); - } - paraprefix = *argv; - break; -#endif - case 'i': /* Collective MPI-IO access with independent IO */ - dxfer_coll_type = DXFER_INDEPENDENT_IO; - break; - case '2': /* Use the split-file driver with MPIO access */ - /* Can use $HDF5_METAPREFIX to define the */ - /* meta-file-prefix. */ - facc_type = FACC_MPIO | FACC_SPLIT; - break; - case 'd': /* dimensizes */ - if (--argc < 2) { - nerrors++; - return (1); - } - dim0 = atoi(*(++argv)) * mpi_size; - argc--; - dim1 = atoi(*(++argv)) * mpi_size; - /* set default chunkdim sizes too */ - chunkdim0 = (dim0 + 9) / 10; - chunkdim1 = (dim1 + 9) / 10; - break; - case 'c': /* chunk dimensions */ - if (--argc < 2) { - nerrors++; - return (1); - } - chunkdim0 = atoi(*(++argv)); - argc--; - chunkdim1 = atoi(*(++argv)); - break; - case 'h': /* print help message--return with nerrors set */ - return (1); - default: - printf("Illegal option(%s)\n", *argv); - nerrors++; - return (1); - } - } - } /*while*/ - - /* check validity of dimension and chunk sizes */ - if (dim0 <= 0 || dim1 <= 0) { - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); - nerrors++; - return (1); - } - if (chunkdim0 <= 0 || chunkdim1 <= 0) { - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); - nerrors++; - return (1); - } - - /* Make sure datasets can be divided into equal portions by the processes */ - if ((dim0 % mpi_size) || (dim1 % mpi_size)) { - if (MAINPROCESS) - printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); - nerrors++; - return (1); - } - - /* compose the test filenames */ - { - int i, n; - - n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ - - for (i = 0; i < n; i++) - strncpy(filenames[i], FILENAME[i], PATH_MAX); -#if 0 /* no support for VFDs right now */ - if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) { - printf("h5_fixname failed\n"); - nerrors++; - return (1); - } -#endif - if (MAINPROCESS) { - printf("Test filenames are:\n"); - for (i = 0; i < n; i++) - printf(" %s\n", filenames[i]); - } - } - - return (0); -} - -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = -1; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO) { - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); - ret = H5Pset_all_coll_metadata_ops(ret_pl, true); - VRFY((ret >= 0), ""); - ret = H5Pset_coll_metadata_write(ret_pl, true); - VRFY((ret >= 0), ""); - return (ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { - hid_t mpio_pl; - - mpio_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return (ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - -/* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */ -static void -sscontig1(void) -{ - contig_hs_dr_pio_test(IND_CONTIG); -} - -/* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */ -static void -sscontig2(void) -{ - contig_hs_dr_pio_test(COL_CONTIG); -} - -/* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */ -static void -sscontig3(void) -{ - contig_hs_dr_pio_test(IND_CHUNKED); -} - -/* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */ -static void -sscontig4(void) -{ - contig_hs_dr_pio_test(COL_CHUNKED); -} - -/* Shape Same test using checker hyperslab using independent IO on contiguous datasets */ -static void -sschecker1(void) -{ - ckrbrd_hs_dr_pio_test(IND_CONTIG); -} - -/* Shape Same test using checker hyperslab using collective IO on contiguous datasets */ -static void -sschecker2(void) -{ - ckrbrd_hs_dr_pio_test(COL_CONTIG); -} - -/* Shape Same test using checker hyperslab using independent IO on chunked datasets */ -static void -sschecker3(void) -{ - ckrbrd_hs_dr_pio_test(IND_CHUNKED); -} - -/* Shape Same test using checker hyperslab using collective IO on chunked datasets */ -static void -sschecker4(void) -{ - ckrbrd_hs_dr_pio_test(COL_CHUNKED); -} - -int -main(int argc, char **argv) -{ - int mpi_size, mpi_rank; /* mpi variables */ - -#ifndef H5_HAVE_WIN32_API - /* Un-buffer the stdout and stderr */ - HDsetbuf(stderr, NULL); - HDsetbuf(stdout, NULL); -#endif - - MPI_Init(&argc, &argv); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - dim0 = ROW_FACTOR * mpi_size; - dim1 = COL_FACTOR * mpi_size; - - if (MAINPROCESS) { - printf("===================================\n"); - printf("Shape Same Tests Start\n"); - printf(" express_test = %d.\n", EXPRESS_MODE /* GetTestExpress() */); - printf("===================================\n"); - } - - /* Attempt to turn off atexit post processing so that in case errors - * happen during the test and the process is aborted, it will not get - * hung in the atexit post processing in which it may try to make MPI - * calls. By then, MPI calls may not work. - */ - if (H5dont_atexit() < 0) { - if (MAINPROCESS) - printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); - }; - H5open(); - /* h5_show_hostname(); */ - - fapl = H5Pcreate(H5P_FILE_ACCESS); - - /* Get the capability flag of the VOL connector being used */ - if (H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) < 0) { - if (MAINPROCESS) - printf("Failed to get the capability flag of the VOL connector being used\n"); - - MPI_Finalize(); - return 0; - } - - /* Make sure the connector supports the API functions being tested. This test only - * uses a few API functions, such as H5Fcreate/close/delete, H5Dcreate/write/read/close, - */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) - printf("API functions for basic file and dataset aren't supported with this connector\n"); - - MPI_Finalize(); - return 0; - } - -#if 0 - memset(filenames, 0, sizeof(filenames)); - for (int i = 0; i < NFILENAME; i++) { - if (NULL == (filenames[i] = malloc(PATH_MAX))) { - printf("couldn't allocate filename array\n"); - MPI_Abort(MPI_COMM_WORLD, -1); - } - } -#endif - - /* Initialize testing framework */ - /* TestInit(argv[0], usage, parse_options); */ - - if (parse_options(argc, argv)) { - usage(); - return 1; - } - - if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { - printf("===================================\n" - " Using Independent I/O with file set view to replace collective I/O \n" - "===================================\n"); - } - - /* Shape Same tests using contiguous hyperslab */ -#if 0 - AddTest("sscontig1", sscontig1, NULL, - "Cntg hslab, ind IO, cntg dsets", filenames[0]); - AddTest("sscontig2", sscontig2, NULL, - "Cntg hslab, col IO, cntg dsets", filenames[0]); - AddTest("sscontig3", sscontig3, NULL, - "Cntg hslab, ind IO, chnk dsets", filenames[0]); - AddTest("sscontig4", sscontig4, NULL, - "Cntg hslab, col IO, chnk dsets", filenames[0]); -#endif - if (MAINPROCESS) { - printf("Cntg hslab, ind IO, cntg dsets\n"); - fflush(stdout); - } - sscontig1(); - if (MAINPROCESS) { - printf("Cntg hslab, col IO, cntg dsets\n"); - fflush(stdout); - } - sscontig2(); - if (MAINPROCESS) { - printf("Cntg hslab, ind IO, chnk dsets\n"); - fflush(stdout); - } - sscontig3(); - if (MAINPROCESS) { - printf("Cntg hslab, col IO, chnk dsets\n"); - fflush(stdout); - } - sscontig4(); - - /* Shape Same tests using checker board hyperslab */ -#if 0 - AddTest("sschecker1", sschecker1, NULL, - "Check hslab, ind IO, cntg dsets", filenames[0]); - AddTest("sschecker2", sschecker2, NULL, - "Check hslab, col IO, cntg dsets", filenames[0]); - AddTest("sschecker3", sschecker3, NULL, - "Check hslab, ind IO, chnk dsets", filenames[0]); - AddTest("sschecker4", sschecker4, NULL, - "Check hslab, col IO, chnk dsets", filenames[0]); -#endif - if (MAINPROCESS) { - printf("Check hslab, ind IO, cntg dsets\n"); - fflush(stdout); - } - sschecker1(); - if (MAINPROCESS) { - printf("Check hslab, col IO, cntg dsets\n"); - fflush(stdout); - } - sschecker2(); - if (MAINPROCESS) { - printf("Check hslab, ind IO, chnk dsets\n"); - fflush(stdout); - } - sschecker3(); - if (MAINPROCESS) { - printf("Check hslab, col IO, chnk dsets\n"); - fflush(stdout); - } - sschecker4(); - - /* Display testing information */ - /* TestInfo(argv[0]); */ - - /* setup file access property list */ - H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); - - /* Parse command line arguments */ - /* TestParseCmdLine(argc, argv); */ - - /* Perform requested testing */ - /* PerformTests(); */ - - /* make sure all processes are finished before final report, cleanup - * and exit. - */ - MPI_Barrier(MPI_COMM_WORLD); - - /* Display test summary, if requested */ - /* if (MAINPROCESS && GetTestSummary()) - TestSummary(); */ - - /* Clean up test files */ - /* h5_clean_files(FILENAME, fapl); */ - H5Fdelete(FILENAME[0], fapl); - H5Pclose(fapl); - - /* nerrors += GetTestNumErrs(); */ - - /* Gather errors from all processes */ - { - int temp; - MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - nerrors = temp; - } - - if (MAINPROCESS) { /* only process 0 reports */ - printf("===================================\n"); - if (nerrors) - printf("***Shape Same tests detected %d errors***\n", nerrors); - else - printf("Shape Same tests finished successfully\n"); - printf("===================================\n"); - } - -#if 0 - for (int i = 0; i < NFILENAME; i++) { - free(filenames[i]); - filenames[i] = NULL; - } -#endif - - /* close HDF5 library */ - H5close(); - - /* Release test infrastructure */ - /* TestShutdown(); */ - - MPI_Finalize(); - - /* cannot just return (nerrors) because exit code is limited to 1byte */ - return (nerrors != 0); -} diff --git a/testpar/API/t_span_tree.c b/testpar/API/t_span_tree.c deleted file mode 100644 index e2f148c9e4b..00000000000 --- a/testpar/API/t_span_tree.c +++ /dev/null @@ -1,2588 +0,0 @@ - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - This program will test irregular hyperslab selections with collective write and read. - The way to test whether collective write and read works is to use independent IO - output to verify the collective output. - - 1) We will write two datasets with the same hyperslab selection settings; - one in independent mode, - one in collective mode, - 2) We will read two datasets with the same hyperslab selection settings, - 1. independent read to read independent output, - independent read to read collective output, - Compare the result, - If the result is the same, then collective write succeeds. - 2. collective read to read independent output, - independent read to read independent output, - Compare the result, - If the result is the same, then collective read succeeds. - - */ - -#include "hdf5.h" -#if 0 -#include "H5private.h" -#endif -#include "testphdf5.h" - -#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0 - -static void coll_write_test(int chunk_factor); -static void coll_read_test(void); - -/*------------------------------------------------------------------------- - * Function: coll_irregular_cont_write - * - * Purpose: Wrapper to test the collectively irregular hyperslab write in - * contiguous storage - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -void -coll_irregular_cont_write(void) -{ - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_write_test(0); -} - -/*------------------------------------------------------------------------- - * Function: coll_irregular_cont_read - * - * Purpose: Wrapper to test the collectively irregular hyperslab read in - * contiguous storage - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -void -coll_irregular_cont_read(void) -{ - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_read_test(); -} - -/*------------------------------------------------------------------------- - * Function: coll_irregular_simple_chunk_write - * - * Purpose: Wrapper to test the collectively irregular hyperslab write in - * chunk storage(1 chunk) - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -void -coll_irregular_simple_chunk_write(void) -{ - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_write_test(1); -} - -/*------------------------------------------------------------------------- - * Function: coll_irregular_simple_chunk_read - * - * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk - * storage(1 chunk) - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -void -coll_irregular_simple_chunk_read(void) -{ - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_read_test(); -} - -/*------------------------------------------------------------------------- - * Function: coll_irregular_complex_chunk_write - * - * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk - * storage(4 chunks) - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -void -coll_irregular_complex_chunk_write(void) -{ - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_write_test(4); -} - -/*------------------------------------------------------------------------- - * Function: coll_irregular_complex_chunk_read - * - * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk - * storage(1 chunk) - * - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -void -coll_irregular_complex_chunk_read(void) -{ - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || - !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file dataset, or dataset more aren't supported with this " - "connector\n"); - fflush(stdout); - } - - return; - } - - coll_read_test(); -} - -/*------------------------------------------------------------------------- - * Function: coll_write_test - * - * Purpose: To test the collectively irregular hyperslab write in chunk - * storage - * Input: number of chunks on each dimension - * if number is equal to 0, contiguous storage - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -void -coll_write_test(int chunk_factor) -{ - - const char *filename; - hid_t facc_plist, dxfer_plist, dcrt_plist; - hid_t file, datasetc, dataseti; /* File and dataset identifiers */ - hid_t mspaceid1, mspaceid, fspaceid, fspaceid1; /* Dataspace identifiers */ - - hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */ - hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */ - hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we - * read selection from the dataset on the disk - */ - - hsize_t start[2]; /* Start of hyperslab */ - hsize_t stride[2]; /* Stride of hyperslab */ - hsize_t count[2]; /* Block count */ - hsize_t block[2]; /* Block sizes */ - hsize_t chunk_dims[2]; - - herr_t ret; - int i; - int fillvalue = 0; /* Fill value for the dataset */ - - int *matrix_out = NULL; - int *matrix_out1 = NULL; /* Buffer to read from the dataset */ - int *vector = NULL; - - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - /*set up MPI parameters */ - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); - - /* Obtain file name */ - filename = PARATESTFILE /* GetTestParameters() */; - - /* - * Buffers' initialization. - */ - - mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size); - mdim[0] = MSPACE_DIM1; - mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size); - fsdim[0] = FSPACE_DIM1; - fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size); - - vector = (int *)malloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size); - matrix_out = (int *)malloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size); - matrix_out1 = (int *)malloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size); - - memset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size); - vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1; - for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++) - vector[i] = (int)i; - - /* Grab file access property list */ - facc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((facc_plist >= 0), ""); - - /* - * Create a file. - */ - file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist); - VRFY((file >= 0), "H5Fcreate succeeded"); - - /* - * Create property list for a dataset and set up fill values. - */ - dcrt_plist = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcrt_plist >= 0), ""); - - ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue); - VRFY((ret >= 0), "Fill value creation property list succeeded"); - - if (chunk_factor != 0) { - chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor; - chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor; - ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims); - VRFY((ret >= 0), "chunk creation property list succeeded"); - } - - /* - * - * Create dataspace for the first dataset in the disk. - * dim1 = 9 - * dim2 = 3600 - * - * - */ - fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL); - VRFY((fspaceid >= 0), "file dataspace created succeeded"); - - /* - * Create dataset in the file. Notice that creation - * property list dcrt_plist is used. - */ - datasetc = - H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); - VRFY((datasetc >= 0), "dataset created succeeded"); - - dataseti = - H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); - VRFY((dataseti >= 0), "dataset created succeeded"); - - /* The First selection for FILE - * - * block (3,2) - * stride(4,3) - * count (1,768/mpi_size) - * start (0,1+768*3*mpi_rank/mpi_size) - * - */ - - start[0] = FHSTART0; - start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1); - stride[0] = FHSTRIDE0; - stride[1] = FHSTRIDE1; - count[0] = FHCOUNT0; - count[1] = FHCOUNT1; - block[0] = FHBLOCK0; - block[1] = FHBLOCK1; - - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* The Second selection for FILE - * - * block (3,768) - * stride (1,1) - * count (1,1) - * start (4,768*mpi_rank/mpi_size) - * - */ - - start[0] = SHSTART0; - start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank); - stride[0] = SHSTRIDE0; - stride[1] = SHSTRIDE1; - count[0] = SHCOUNT0; - count[1] = SHCOUNT1; - block[0] = SHBLOCK0; - block[1] = SHBLOCK1; - - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* - * Create dataspace for the first dataset in the memory - * dim1 = 27000 - * - */ - mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL); - VRFY((mspaceid1 >= 0), "memory dataspace created succeeded"); - - /* - * Memory space is 1-D, this is a good test to check - * whether a span-tree derived datatype needs to be built. - * block 1 - * stride 1 - * count 6912/mpi_size - * start 1 - * - */ - start[0] = MHSTART0; - stride[0] = MHSTRIDE0; - count[0] = MHCOUNT0; - block[0] = MHBLOCK0; - - ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* independent write */ - ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector); - VRFY((ret >= 0), "dataset independent write succeed"); - - dxfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxfer_plist >= 0), ""); - - ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "MPIO data transfer property list succeed"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* collective write */ - ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector); - VRFY((ret >= 0), "dataset collective write succeed"); - - ret = H5Sclose(mspaceid1); - VRFY((ret >= 0), ""); - - ret = H5Sclose(fspaceid); - VRFY((ret >= 0), ""); - - /* - * Close dataset. - */ - ret = H5Dclose(datasetc); - VRFY((ret >= 0), ""); - - ret = H5Dclose(dataseti); - VRFY((ret >= 0), ""); - - /* - * Close the file. - */ - ret = H5Fclose(file); - VRFY((ret >= 0), ""); - /* - * Close property list - */ - - ret = H5Pclose(facc_plist); - VRFY((ret >= 0), ""); - ret = H5Pclose(dxfer_plist); - VRFY((ret >= 0), ""); - ret = H5Pclose(dcrt_plist); - VRFY((ret >= 0), ""); - - /* - * Open the file. - */ - - /*** - - For testing collective hyperslab selection write - In this test, we are using independent read to check - the correctness of collective write compared with - independent write, - - In order to thoroughly test this feature, we choose - a different selection set for reading the data out. - - - ***/ - - /* Obtain file access property list with MPI-IO driver */ - facc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((facc_plist >= 0), ""); - - file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); - VRFY((file >= 0), "H5Fopen succeeded"); - - /* - * Open the dataset. - */ - datasetc = H5Dopen2(file, "collect_write", H5P_DEFAULT); - VRFY((datasetc >= 0), "H5Dopen2 succeeded"); - - dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT); - VRFY((dataseti >= 0), "H5Dopen2 succeeded"); - - /* - * Get dataspace of the open dataset. - */ - fspaceid = H5Dget_space(datasetc); - VRFY((fspaceid >= 0), "file dataspace obtained succeeded"); - - fspaceid1 = H5Dget_space(dataseti); - VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded"); - - /* The First selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (1,2+768*mpi_rank/mpi_size) - * - */ - start[0] = RFFHSTART0; - start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1); - block[0] = RFFHBLOCK0; - block[1] = RFFHBLOCK1; - stride[0] = RFFHSTRIDE0; - stride[1] = RFFHSTRIDE1; - count[0] = RFFHCOUNT0; - count[1] = RFFHCOUNT1; - - /* The first selection of the dataset generated by collective write */ - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* The first selection of the dataset generated by independent write */ - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* The Second selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,1536/mpi_size) - * start (2,4+1536*mpi_rank/mpi_size) - * - */ - - start[0] = RFSHSTART0; - start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank); - block[0] = RFSHBLOCK0; - block[1] = RFSHBLOCK1; - stride[0] = RFSHSTRIDE0; - stride[1] = RFSHSTRIDE0; - count[0] = RFSHCOUNT0; - count[1] = RFSHCOUNT1; - - /* The second selection of the dataset generated by collective write */ - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* The second selection of the dataset generated by independent write */ - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* - * Create memory dataspace. - * rank = 2 - * mdim1 = 9 - * mdim2 = 3600 - * - */ - mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); - - /* - * Select two hyperslabs in memory. Hyperslabs have the same - * size and shape as the selected hyperslabs for the file dataspace - * Only the starting point is different. - * The first selection - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (0,768*mpi_rank/mpi_size) - * - */ - - start[0] = RMFHSTART0; - start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1); - block[0] = RMFHBLOCK0; - block[1] = RMFHBLOCK1; - stride[0] = RMFHSTRIDE0; - stride[1] = RMFHSTRIDE1; - count[0] = RMFHCOUNT0; - count[1] = RMFHCOUNT1; - - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* - * Select two hyperslabs in memory. Hyperslabs has the same - * size and shape as the selected hyperslabs for the file dataspace - * Only the starting point is different. - * The second selection - * block (1,1) - * stride(1,1) - * count (3,1536/mpi_size) - * start (1,2+1536*mpi_rank/mpi_size) - * - */ - start[0] = RMSHSTART0; - start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1); - block[0] = RMSHBLOCK0; - block[1] = RMSHBLOCK1; - stride[0] = RMSHSTRIDE0; - stride[1] = RMSHSTRIDE1; - count[0] = RMSHCOUNT0; - count[1] = RMSHCOUNT1; - - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* - * Initialize data buffer. - */ - - memset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); - memset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); - /* - * Read data back to the buffer matrix_out. - */ - - ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out); - VRFY((ret >= 0), "H5D independent read succeed"); - - ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1); - VRFY((ret >= 0), "H5D independent read succeed"); - - ret = 0; - - for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) { - if (matrix_out[i] != matrix_out1[i]) - ret = -1; - if (ret < 0) - break; - } - - VRFY((ret >= 0), "H5D irregular collective write succeed"); - - /* - * Close memory file and memory dataspaces. - */ - ret = H5Sclose(mspaceid); - VRFY((ret >= 0), ""); - ret = H5Sclose(fspaceid); - VRFY((ret >= 0), ""); - - /* - * Close dataset. - */ - ret = H5Dclose(dataseti); - VRFY((ret >= 0), ""); - - ret = H5Dclose(datasetc); - VRFY((ret >= 0), ""); - - /* - * Close property list - */ - - ret = H5Pclose(facc_plist); - VRFY((ret >= 0), ""); - - /* - * Close the file. - */ - ret = H5Fclose(file); - VRFY((ret >= 0), ""); - - if (vector) - free(vector); - if (matrix_out) - free(matrix_out); - if (matrix_out1) - free(matrix_out1); - - return; -} - -/*------------------------------------------------------------------------- - * Function: coll_read_test - * - * Purpose: To test the collectively irregular hyperslab read in chunk - * storage - * Input: number of chunks on each dimension - * if number is equal to 0, contiguous storage - * Return: Success: 0 - * - * Failure: -1 - * - *------------------------------------------------------------------------- - */ -static void -coll_read_test(void) -{ - - const char *filename; - hid_t facc_plist, dxfer_plist; - hid_t file, dataseti; /* File and dataset identifiers */ - hid_t mspaceid, fspaceid1; /* Dataspace identifiers */ - - /* Dimension sizes of the dataset (on disk) */ - hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we - * read selection from the dataset on the disk - */ - - hsize_t start[2]; /* Start of hyperslab */ - hsize_t stride[2]; /* Stride of hyperslab */ - hsize_t count[2]; /* Block count */ - hsize_t block[2]; /* Block sizes */ - herr_t ret; - - int i; - - int *matrix_out; - int *matrix_out1; /* Buffer to read from the dataset */ - - int mpi_size, mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - /*set up MPI parameters */ - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); - - /* Obtain file name */ - filename = PARATESTFILE /* GetTestParameters() */; - - /* Initialize the buffer */ - - mdim[0] = MSPACE_DIM1; - mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size); - matrix_out = (int *)malloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); - matrix_out1 = (int *)malloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); - - /*** For testing collective hyperslab selection read ***/ - - /* Obtain file access property list */ - facc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((facc_plist >= 0), ""); - - /* - * Open the file. - */ - file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); - VRFY((file >= 0), "H5Fopen succeeded"); - - /* - * Open the dataset. - */ - dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT); - VRFY((dataseti >= 0), "H5Dopen2 succeeded"); - - /* - * Get dataspace of the open dataset. - */ - fspaceid1 = H5Dget_space(dataseti); - VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded"); - - /* The First selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (1,2+768*mpi_rank/mpi_size) - * - */ - start[0] = RFFHSTART0; - start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1); - block[0] = RFFHBLOCK0; - block[1] = RFFHBLOCK1; - stride[0] = RFFHSTRIDE0; - stride[1] = RFFHSTRIDE1; - count[0] = RFFHCOUNT0; - count[1] = RFFHCOUNT1; - - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* The Second selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,1536/mpi_size) - * start (2,4+1536*mpi_rank/mpi_size) - * - */ - start[0] = RFSHSTART0; - start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank); - block[0] = RFSHBLOCK0; - block[1] = RFSHBLOCK1; - stride[0] = RFSHSTRIDE0; - stride[1] = RFSHSTRIDE0; - count[0] = RFSHCOUNT0; - count[1] = RFSHCOUNT1; - - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* - * Create memory dataspace. - */ - mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); - - /* - * Select two hyperslabs in memory. Hyperslabs have the same - * size and shape as the selected hyperslabs for the file dataspace. - * Only the starting point is different. - * The first selection - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (0,768*mpi_rank/mpi_size) - * - */ - - start[0] = RMFHSTART0; - start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1); - block[0] = RMFHBLOCK0; - block[1] = RMFHBLOCK1; - stride[0] = RMFHSTRIDE0; - stride[1] = RMFHSTRIDE1; - count[0] = RMFHCOUNT0; - count[1] = RMFHCOUNT1; - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* - * Select two hyperslabs in memory. Hyperslabs has the same - * size and shape as the selected hyperslabs for the file dataspace - * Only the starting point is different. - * The second selection - * block (1,1) - * stride(1,1) - * count (3,1536/mpi_size) - * start (1,2+1536*mpi_rank/mpi_size) - * - */ - start[0] = RMSHSTART0; - start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1); - block[0] = RMSHBLOCK0; - block[1] = RMSHBLOCK1; - stride[0] = RMSHSTRIDE0; - stride[1] = RMSHSTRIDE1; - count[0] = RMSHCOUNT0; - count[1] = RMSHCOUNT1; - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "hyperslab selection succeeded"); - - /* - * Initialize data buffer. - */ - - memset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); - memset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); - - /* - * Read data back to the buffer matrix_out. - */ - - dxfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxfer_plist >= 0), ""); - - ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "MPIO data transfer property list succeed"); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "set independent IO collectively succeeded"); - } - - /* Collective read */ - ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, dxfer_plist, matrix_out); - VRFY((ret >= 0), "H5D collecive read succeed"); - - ret = H5Pclose(dxfer_plist); - VRFY((ret >= 0), ""); - - /* Independent read */ - ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, H5P_DEFAULT, matrix_out1); - VRFY((ret >= 0), "H5D independent read succeed"); - - ret = 0; - for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) { - if (matrix_out[i] != matrix_out1[i]) - ret = -1; - if (ret < 0) - break; - } - VRFY((ret >= 0), "H5D contiguous irregular collective read succeed"); - - /* - * Free read buffers. - */ - free(matrix_out); - free(matrix_out1); - - /* - * Close memory file and memory dataspaces. - */ - ret = H5Sclose(mspaceid); - VRFY((ret >= 0), ""); - ret = H5Sclose(fspaceid1); - VRFY((ret >= 0), ""); - - /* - * Close dataset. - */ - ret = H5Dclose(dataseti); - VRFY((ret >= 0), ""); - - /* - * Close property list - */ - ret = H5Pclose(facc_plist); - VRFY((ret >= 0), ""); - - /* - * Close the file. - */ - ret = H5Fclose(file); - VRFY((ret >= 0), ""); - - return; -} - -/**************************************************************** -** -** lower_dim_size_comp_test__select_checker_board(): -** -** Given a dataspace of tgt_rank, and dimensions: -** -** (mpi_size + 1), edge_size, ... , edge_size -** -** edge_size, and a checker_edge_size, select a checker -** board selection of a sel_rank (sel_rank < tgt_rank) -** dimensional slice through the dataspace parallel to the -** sel_rank fastest changing indices, with origin (in the -** higher indices) as indicated by the start array. -** -** Note that this function is hard-coded to presume a -** maximum dataspace rank of 5. -** -** While this maximum is declared as a constant, increasing -** it will require extensive coding in addition to changing -** the value of the constant. -** -** JRM -- 11/11/09 -** -****************************************************************/ - -#define LDSCT_DS_RANK 5 -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG -#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0 -#endif - -#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0 - -static void -lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank, - const hsize_t dims[LDSCT_DS_RANK], const int checker_edge_size, - const int sel_rank, hsize_t sel_start[]) -{ -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - const char *fcnName = "lower_dim_size_comp_test__select_checker_board():"; -#endif - bool first_selection = true; - int i, j, k, l, m; - int ds_offset; - int sel_offset; - const int test_max_rank = LDSCT_DS_RANK; /* must update code if */ - /* this changes */ - hsize_t base_count; - hsize_t offset_count; - hsize_t start[LDSCT_DS_RANK]; - hsize_t stride[LDSCT_DS_RANK]; - hsize_t count[LDSCT_DS_RANK]; - hsize_t block[LDSCT_DS_RANK]; - herr_t ret; /* Generic return value */ - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", fcnName, mpi_rank, - (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4], checker_edge_size); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - assert(0 < checker_edge_size); - assert(0 < sel_rank); - assert(sel_rank <= tgt_rank); - assert(tgt_rank <= test_max_rank); - assert(test_max_rank <= LDSCT_DS_RANK); - - sel_offset = test_max_rank - sel_rank; - assert(sel_offset >= 0); - - ds_offset = test_max_rank - tgt_rank; - assert(ds_offset >= 0); - assert(ds_offset <= sel_offset); - - assert((hsize_t)checker_edge_size <= dims[sel_offset]); - assert(dims[sel_offset] == 10); - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset); - fprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, ds_offset); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - /* First, compute the base count (which assumes start == 0 - * for the associated offset) and offset_count (which - * assumes start == checker_edge_size for the associated - * offset). - * - * Note that the following computation depends on the C99 - * requirement that integer division discard any fraction - * (truncation towards zero) to function correctly. As we - * now require C99, this shouldn't be a problem, but note - * it may save us some pain if we are ever obliged to support - * pre-C99 compilers again. - */ - - base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2); - - if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) { - - base_count++; - } - - offset_count = - (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / ((hsize_t)(checker_edge_size * 2))); - - if (((dims[sel_rank] - (hsize_t)checker_edge_size) % ((hsize_t)(checker_edge_size * 2))) > 0) { - - offset_count++; - } - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", fcnName, mpi_rank, base_count, - offset_count); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - /* Now set up the stride and block arrays, and portions of the start - * and count arrays that will not be altered during the selection of - * the checkerboard. - */ - i = 0; - while (i < ds_offset) { - - /* these values should never be used */ - start[i] = 0; - stride[i] = 0; - count[i] = 0; - block[i] = 0; - - i++; - } - - while (i < sel_offset) { - - start[i] = sel_start[i]; - stride[i] = 2 * dims[i]; - count[i] = 1; - block[i] = 1; - - i++; - } - - while (i < test_max_rank) { - - stride[i] = (hsize_t)(2 * checker_edge_size); - block[i] = (hsize_t)checker_edge_size; - - i++; - } - - i = 0; - do { - if (0 >= sel_offset) { - - if (i == 0) { - - start[0] = 0; - count[0] = base_count; - } - else { - - start[0] = (hsize_t)checker_edge_size; - count[0] = offset_count; - } - } - - j = 0; - do { - if (1 >= sel_offset) { - - if (j == 0) { - - start[1] = 0; - count[1] = base_count; - } - else { - - start[1] = (hsize_t)checker_edge_size; - count[1] = offset_count; - } - } - - k = 0; - do { - if (2 >= sel_offset) { - - if (k == 0) { - - start[2] = 0; - count[2] = base_count; - } - else { - - start[2] = (hsize_t)checker_edge_size; - count[2] = offset_count; - } - } - - l = 0; - do { - if (3 >= sel_offset) { - - if (l == 0) { - - start[3] = 0; - count[3] = base_count; - } - else { - - start[3] = (hsize_t)checker_edge_size; - count[3] = offset_count; - } - } - - m = 0; - do { - if (4 >= sel_offset) { - - if (m == 0) { - - start[4] = 0; - count[4] = base_count; - } - else { - - start[4] = (hsize_t)checker_edge_size; - count[4] = offset_count; - } - } - - if (((i + j + k + l + m) % 2) == 0) { - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - - fprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank, - (int)first_selection); - fprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, - j, k, l, m); - fprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)start[0], (int)start[1], (int)start[2], (int)start[3], - (int)start[4]); - fprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3], - (int)stride[4]); - fprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)count[0], (int)count[1], (int)count[2], (int)count[3], - (int)count[4]); - fprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank, - (int)block[0], (int)block[1], (int)block[2], (int)block[3], - (int)block[4]); - fprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank, - H5Sget_simple_extent_ndims(tgt_sid)); - fprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank); - } -#endif - - if (first_selection) { - - first_selection = false; - - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]), - &(stride[ds_offset]), &(count[ds_offset]), - &(block[ds_offset])); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded"); - } - else { - - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[ds_offset]), - &(stride[ds_offset]), &(count[ds_offset]), - &(block[ds_offset])); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); - } - } - - m++; - - } while ((m <= 1) && (4 >= sel_offset)); - - l++; - - } while ((l <= 1) && (3 >= sel_offset)); - - k++; - - } while ((k <= 1) && (2 >= sel_offset)); - - j++; - - } while ((j <= 1) && (1 >= sel_offset)); - - i++; - - } while ((i <= 1) && (0 >= sel_offset)); - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(tgt_sid)); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - /* Clip the selection back to the dataspace proper. */ - - for (i = 0; i < test_max_rank; i++) { - - start[i] = 0; - stride[i] = dims[i]; - count[i] = 1; - block[i] = dims[i]; - } - - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded"); - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(tgt_sid)); - fprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ - - return; - -} /* lower_dim_size_comp_test__select_checker_board() */ - -/**************************************************************** -** -** lower_dim_size_comp_test__verify_data(): -** -** Examine the supplied buffer to see if it contains the -** expected data. Return true if it does, and false -** otherwise. -** -** The supplied buffer is presumed to be this process's slice -** of the target data set. Each such slice will be an -** n-cube of rank (rank -1) and the supplied edge_size with -** origin (mpi_rank, 0, ... , 0) in the target data set. -** -** Further, the buffer is presumed to be the result of reading -** or writing a checkerboard selection of an m (1 <= m < -** rank) dimensional slice through this processes slice -** of the target data set. Also, this slice must be parallel -** to the fastest changing indices. -** -** It is further presumed that the buffer was zeroed before -** the read/write, and that the full target data set (i.e. -** the buffer/data set for all processes) was initialized -** with the natural numbers listed in order from the origin -** along the fastest changing axis. -** -** Thus, for a 20x10x10 dataset, the value stored in location -** (x, y, z) (assuming that z is the fastest changing index -** and x the slowest) is assumed to be: -** -** (10 * 10 * x) + (10 * y) + z -** -** Further, supposing that this is process 10, this process's -** slice of the dataset would be a 10 x 10 2-cube with origin -** (10, 0, 0) in the data set, and would be initialized (prior -** to the checkerboard selection) as follows: -** -** 1000, 1001, 1002, ... 1008, 1009 -** 1010, 1011, 1012, ... 1018, 1019 -** . . . . . -** . . . . . -** . . . . . -** 1090, 1091, 1092, ... 1098, 1099 -** -** In the case of a read from the processors slice of another -** data set of different rank, the values expected will have -** to be adjusted accordingly. This is done via the -** first_expected_val parameter. -** -** Finally, the function presumes that the first element -** of the buffer resides either at the origin of either -** a selected or an unselected checker. (Translation: -** if partial checkers appear in the buffer, they will -** intersect the edges of the n-cube opposite the origin.) -** -****************************************************************/ - -#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0 - -static bool -lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr, -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - const int mpi_rank, -#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ - const int rank, const int edge_size, const int checker_edge_size, - uint32_t first_expected_val, bool buf_starts_in_checker) -{ -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - const char *fcnName = "lower_dim_size_comp_test__verify_data():"; -#endif - bool good_data = true; - bool in_checker; - bool start_in_checker[5]; - uint32_t expected_value; - uint32_t *val_ptr; - int i, j, k, l, m; /* to track position in n-cube */ - int v, w, x, y, z; /* to track position in checker */ - const int test_max_rank = 5; /* code changes needed if this is increased */ - - assert(buf_ptr != NULL); - assert(0 < rank); - assert(rank <= test_max_rank); - assert(edge_size >= 6); - assert(0 < checker_edge_size); - assert(checker_edge_size <= edge_size); - assert(test_max_rank <= LDSCT_DS_RANK); - -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank); - fprintf(stdout, "%s rank = %d.\n", fcnName, rank); - fprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size); - fprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size); - fprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val); - fprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker); - } -#endif - - val_ptr = buf_ptr; - expected_value = first_expected_val; - - i = 0; - v = 0; - start_in_checker[0] = buf_starts_in_checker; - do { - if (v >= checker_edge_size) { - - start_in_checker[0] = !start_in_checker[0]; - v = 0; - } - - j = 0; - w = 0; - start_in_checker[1] = start_in_checker[0]; - do { - if (w >= checker_edge_size) { - - start_in_checker[1] = !start_in_checker[1]; - w = 0; - } - - k = 0; - x = 0; - start_in_checker[2] = start_in_checker[1]; - do { - if (x >= checker_edge_size) { - - start_in_checker[2] = !start_in_checker[2]; - x = 0; - } - - l = 0; - y = 0; - start_in_checker[3] = start_in_checker[2]; - do { - if (y >= checker_edge_size) { - - start_in_checker[3] = !start_in_checker[3]; - y = 0; - } - - m = 0; - z = 0; -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m); - } -#endif - in_checker = start_in_checker[3]; - do { -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, " %d", (int)(*val_ptr)); - } -#endif - if (z >= checker_edge_size) { - - in_checker = !in_checker; - z = 0; - } - - if (in_checker) { - - if (*val_ptr != expected_value) { - - good_data = false; - } - - /* zero out buffer for reuse */ - *val_ptr = 0; - } - else if (*val_ptr != 0) { - - good_data = false; - - /* zero out buffer for reuse */ - *val_ptr = 0; - } - - val_ptr++; - expected_value++; - m++; - z++; - - } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "\n"); - } -#endif - l++; - y++; - } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); - k++; - x++; - } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); - j++; - w++; - } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); - i++; - v++; - } while ((rank >= test_max_rank) && (i < edge_size)); - - return (good_data); - -} /* lower_dim_size_comp_test__verify_data() */ - -/*------------------------------------------------------------------------- - * Function: lower_dim_size_comp_test__run_test() - * - * Purpose: Verify that a bug in the computation of the size of the - * lower dimensions of a dataspace in H5S_obtain_datatype() - * has been corrected. - * - * Return: void - *------------------------------------------------------------------------- - */ - -#define LDSCT_DS_RANK 5 - -static void -lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_collective_io, - const hid_t dset_type) -{ -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - const char *fcnName = "lower_dim_size_comp_test__run_test()"; - int rank; - hsize_t dims[32]; - hsize_t max_dims[32]; -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - const char *filename; - bool data_ok = false; - bool mis_match = false; - int i; - int start_index; - int stop_index; - int mrc; - int mpi_rank; - int mpi_size; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist = H5P_DEFAULT; - size_t small_ds_size; - size_t small_ds_slice_size; - size_t large_ds_size; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - size_t large_ds_slice_size; -#endif - uint32_t expected_value; - uint32_t *small_ds_buf_0 = NULL; - uint32_t *small_ds_buf_1 = NULL; - uint32_t *large_ds_buf_0 = NULL; - uint32_t *large_ds_buf_1 = NULL; - uint32_t *ptr_0; - uint32_t *ptr_1; - hsize_t small_chunk_dims[LDSCT_DS_RANK]; - hsize_t large_chunk_dims[LDSCT_DS_RANK]; - hsize_t small_dims[LDSCT_DS_RANK]; - hsize_t large_dims[LDSCT_DS_RANK]; - hsize_t start[LDSCT_DS_RANK]; - hsize_t stride[LDSCT_DS_RANK]; - hsize_t count[LDSCT_DS_RANK]; - hsize_t block[LDSCT_DS_RANK]; - hsize_t small_sel_start[LDSCT_DS_RANK]; - hsize_t large_sel_start[LDSCT_DS_RANK]; - hid_t full_mem_small_ds_sid; - hid_t full_file_small_ds_sid; - hid_t mem_small_ds_sid; - hid_t file_small_ds_sid; - hid_t full_mem_large_ds_sid; - hid_t full_file_large_ds_sid; - hid_t mem_large_ds_sid; - hid_t file_large_ds_sid; - hid_t small_ds_dcpl_id = H5P_DEFAULT; - hid_t large_ds_dcpl_id = H5P_DEFAULT; - hid_t small_dataset; /* Dataset ID */ - hid_t large_dataset; /* Dataset ID */ - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ - - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - assert(mpi_size >= 1); - - mpi_comm = MPI_COMM_WORLD; - mpi_info = MPI_INFO_NULL; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", fcnName, mpi_rank, (int)chunk_edge_size); - fprintf(stdout, "%s:%d: use_collective_io = %d.\n", fcnName, mpi_rank, (int)use_collective_io); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10); - small_ds_slice_size = (size_t)(1 * 1 * 10 * 10); - large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - large_ds_slice_size = (size_t)(10 * 10 * 10 * 10); - - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", fcnName, mpi_rank, - (int)small_ds_size, (int)small_ds_slice_size); - fprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", fcnName, mpi_rank, - (int)large_ds_size, (int)large_ds_slice_size); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - /* Allocate buffers */ - small_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * small_ds_size); - VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded"); - - small_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * small_ds_size); - VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded"); - - large_ds_buf_0 = (uint32_t *)malloc(sizeof(uint32_t) * large_ds_size); - VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded"); - - large_ds_buf_1 = (uint32_t *)malloc(sizeof(uint32_t) * large_ds_size); - VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded"); - - /* initialize the buffers */ - - ptr_0 = small_ds_buf_0; - ptr_1 = small_ds_buf_1; - - for (i = 0; i < (int)small_ds_size; i++) { - - *ptr_0 = (uint32_t)i; - *ptr_1 = 0; - - ptr_0++; - ptr_1++; - } - - ptr_0 = large_ds_buf_0; - ptr_1 = large_ds_buf_1; - - for (i = 0; i < (int)large_ds_size; i++) { - - *ptr_0 = (uint32_t)i; - *ptr_1 = 0; - - ptr_0++; - ptr_1++; - } - - /* get the file name */ - - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - assert(filename != NULL); - - /* ---------------------------------------- - * CREATE AN HDF5 FILE WITH PARALLEL ACCESS - * ---------------------------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type); - VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded"); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - MESG("File opened."); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); - - /* setup dims: */ - small_dims[0] = (hsize_t)(mpi_size + 1); - small_dims[1] = 1; - small_dims[2] = 1; - small_dims[3] = 10; - small_dims[4] = 10; - - large_dims[0] = (hsize_t)(mpi_size + 1); - large_dims[1] = 10; - large_dims[2] = 10; - large_dims[3] = 10; - large_dims[4] = 10; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)small_dims[0], - (int)small_dims[1], (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]); - fprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)large_dims[0], - (int)large_dims[1], (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]); - } -#endif - - /* create dataspaces */ - - full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded"); - - full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded"); - - mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded"); - - file_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((file_small_ds_sid != 0), "H5Screate_simple() file_small_ds_sid succeeded"); - - full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded"); - - full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((full_file_large_ds_sid != 0), "H5Screate_simple() full_file_large_ds_sid succeeded"); - - mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((mem_large_ds_sid != 0), "H5Screate_simple() mem_large_ds_sid succeeded"); - - file_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((file_large_ds_sid != 0), "H5Screate_simple() file_large_ds_sid succeeded"); - - /* Select the entire extent of the full small ds dataspaces */ - ret = H5Sselect_all(full_mem_small_ds_sid); - VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded"); - - ret = H5Sselect_all(full_file_small_ds_sid); - VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded"); - - /* Select the entire extent of the full large ds dataspaces */ - ret = H5Sselect_all(full_mem_large_ds_sid); - VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded"); - - ret = H5Sselect_all(full_file_large_ds_sid); - VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded"); - - /* if chunk edge size is greater than zero, set up the small and - * large data set creation property lists to specify chunked - * datasets. - */ - if (chunk_edge_size > 0) { - - small_chunk_dims[0] = (hsize_t)(1); - small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1; - small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, - (int)small_chunk_dims[0], (int)small_chunk_dims[1], (int)small_chunk_dims[2], - (int)small_chunk_dims[3], (int)small_chunk_dims[4]); - } -#endif - - small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded"); - - ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED); - VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded"); - - ret = H5Pset_chunk(small_ds_dcpl_id, 5, small_chunk_dims); - VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); - - large_chunk_dims[0] = (hsize_t)(1); - large_chunk_dims[1] = large_chunk_dims[2] = large_chunk_dims[3] = large_chunk_dims[4] = - (hsize_t)chunk_edge_size; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, - (int)large_chunk_dims[0], (int)large_chunk_dims[1], (int)large_chunk_dims[2], - (int)large_chunk_dims[3], (int)large_chunk_dims[4]); - } -#endif - - large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded"); - - ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED); - VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded"); - - ret = H5Pset_chunk(large_ds_dcpl_id, 5, large_chunk_dims); - VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded"); - } - - /* create the small dataset */ - small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, file_small_ds_sid, H5P_DEFAULT, - small_ds_dcpl_id, H5P_DEFAULT); - VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded"); - - /* create the large dataset */ - large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, file_large_ds_sid, H5P_DEFAULT, - large_ds_dcpl_id, H5P_DEFAULT); - VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded"); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: small/large ds id = %d / %d.\n", fcnName, mpi_rank, (int)small_dataset, - (int)large_dataset); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - /* setup xfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - if (!use_collective_io) { - - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() succeeded"); - } - - /* setup selection to write initial data to the small data sets */ - start[0] = (hsize_t)(mpi_rank + 1); - start[1] = start[2] = start[3] = start[4] = 0; - - stride[0] = (hsize_t)(2 * (mpi_size + 1)); - stride[1] = stride[2] = 2; - stride[3] = stride[4] = 2 * 10; - - count[0] = count[1] = count[2] = count[3] = count[4] = 1; - - block[0] = block[1] = block[2] = 1; - block[3] = block[4] = 10; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: settings for small data set initialization.\n", fcnName, mpi_rank); - fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], (int)start[1], - (int)start[2], (int)start[3], (int)start[4]); - fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], - (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); - fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], (int)count[1], - (int)count[2], (int)count[3], (int)count[4]); - fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], (int)block[1], - (int)block[2], (int)block[3], (int)block[4]); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - /* setup selections for writing initial data to the small data set */ - ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); - - ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) succeeded"); - - if (MAINPROCESS) { /* add an additional slice to the selections */ - - start[0] = 0; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank); - fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], - (int)start[1], (int)start[2], (int)start[3], (int)start[4]); - fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], - (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); - fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], - (int)count[1], (int)count[2], (int)count[3], (int)count[4]); - fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], - (int)block[1], (int)block[2], (int)block[3], (int)block[4]); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded"); - - ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) succeeded"); - } - - check = H5Sselect_valid(mem_small_ds_sid); - VRFY((check == true), "H5Sselect_valid(mem_small_ds_sid) returns true"); - - check = H5Sselect_valid(file_small_ds_sid); - VRFY((check == true), "H5Sselect_valid(file_small_ds_sid) returns true"); - - /* write the initial value of the small data set to file */ -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: writing init value of small ds to file.\n", fcnName, mpi_rank); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0); - VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); - - /* sync with the other processes before reading data */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); - - /* read the small data set back to verify that it contains the - * expected data. Note that each process reads in the entire - * data set and verifies it. - */ - ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, full_mem_small_ds_sid, full_file_small_ds_sid, xfer_plist, - small_ds_buf_1); - VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded"); - - /* sync with the other processes before checking data */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); - - /* verify that the correct data was written to the small data set, - * and reset the buffer to zero in passing. - */ - expected_value = 0; - mis_match = false; - ptr_1 = small_ds_buf_1; - - i = 0; - for (i = 0; i < (int)small_ds_size; i++) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - - *ptr_1 = (uint32_t)0; - - ptr_1++; - expected_value++; - } - VRFY((mis_match == false), "small ds init data good."); - - /* setup selections for writing initial data to the large data set */ - start[0] = (hsize_t)(mpi_rank + 1); - start[1] = start[2] = start[3] = start[4] = (hsize_t)0; - - stride[0] = (hsize_t)(2 * (mpi_size + 1)); - stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10); - - count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1; - - block[0] = (hsize_t)1; - block[1] = block[2] = block[3] = block[4] = (hsize_t)10; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: settings for large data set initialization.\n", fcnName, mpi_rank); - fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], (int)start[1], - (int)start[2], (int)start[3], (int)start[4]); - fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], - (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); - fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], (int)count[1], - (int)count[2], (int)count[3], (int)count[4]); - fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], (int)block[1], - (int)block[2], (int)block[3], (int)block[4]); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded"); - - ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) succeeded"); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(mem_large_ds_sid)); - fprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(file_large_ds_sid)); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - if (MAINPROCESS) { /* add an additional slice to the selections */ - - start[0] = (hsize_t)0; - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank); - fprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], - (int)start[1], (int)start[2], (int)start[3], (int)start[4]); - fprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], - (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); - fprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], - (int)count[1], (int)count[2], (int)count[3], (int)count[4]); - fprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], - (int)block[1], (int)block[2], (int)block[3], (int)block[4]); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded"); - - ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) succeeded"); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(mem_large_ds_sid)); - fprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank, - (int)H5Sget_select_npoints(file_large_ds_sid)); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - } - - /* try clipping the selection back to the large dataspace proper */ - start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0; - - stride[0] = (hsize_t)(2 * (mpi_size + 1)); - stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10); - - count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1; - - block[0] = (hsize_t)(mpi_size + 1); - block[1] = block[2] = block[3] = block[4] = (hsize_t)10; - - ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, start, stride, count, block); - VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded"); - - ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, start, stride, count, block); - VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_ds_sid, and) succeeded"); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - - rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims); - fprintf(stdout, "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank, - (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]); - - rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims); - fprintf(stdout, "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank, - (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - check = H5Sselect_valid(mem_large_ds_sid); - VRFY((check == true), "H5Sselect_valid(mem_large_ds_sid) returns true"); - - check = H5Sselect_valid(file_large_ds_sid); - VRFY((check == true), "H5Sselect_valid(file_large_ds_sid) returns true"); - - /* write the initial value of the large data set to file */ -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: writing init value of large ds to file.\n", fcnName, mpi_rank); - fprintf(stdout, "%s:%d: large_dataset = %d.\n", fcnName, mpi_rank, (int)large_dataset); - fprintf(stdout, "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", fcnName, mpi_rank, - (int)mem_large_ds_sid, (int)file_large_ds_sid); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid, xfer_plist, large_ds_buf_0); - - if (ret < 0) - H5Eprint2(H5E_DEFAULT, stderr); - VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); - - /* sync with the other processes before checking data */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); - - /* read the large data set back to verify that it contains the - * expected data. Note that each process reads in the entire - * data set. - */ - ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, full_mem_large_ds_sid, full_file_large_ds_sid, xfer_plist, - large_ds_buf_1); - VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded"); - - /* verify that the correct data was written to the large data set. - * in passing, reset the buffer to zeros - */ - expected_value = 0; - mis_match = false; - ptr_1 = large_ds_buf_1; - - i = 0; - for (i = 0; i < (int)large_ds_size; i++) { - - if (*ptr_1 != expected_value) { - - mis_match = true; - } - - *ptr_1 = (uint32_t)0; - - ptr_1++; - expected_value++; - } - VRFY((mis_match == false), "large ds init data good."); - - /***********************************/ - /***** INITIALIZATION COMPLETE *****/ - /***********************************/ - - /* read a checkerboard selection of the process slice of the - * small on disk data set into the process slice of the large - * in memory data set, and verify the data read. - */ - - small_sel_start[0] = (hsize_t)(mpi_rank + 1); - small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0; - - lower_dim_size_comp_test__select_checker_board(mpi_rank, file_small_ds_sid, - /* tgt_rank = */ 5, small_dims, - /* checker_edge_size = */ 3, - /* sel_rank */ 2, small_sel_start); - - expected_value = - (uint32_t)((small_sel_start[0] * small_dims[1] * small_dims[2] * small_dims[3] * small_dims[4]) + - (small_sel_start[1] * small_dims[2] * small_dims[3] * small_dims[4]) + - (small_sel_start[2] * small_dims[3] * small_dims[4]) + - (small_sel_start[3] * small_dims[4]) + (small_sel_start[4])); - - large_sel_start[0] = (hsize_t)(mpi_rank + 1); - large_sel_start[1] = 5; - large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0; - - lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_large_ds_sid, - /* tgt_rank = */ 5, large_dims, - /* checker_edge_size = */ 3, - /* sel_rank = */ 2, large_sel_start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid); - VRFY((check == true), "H5Sselect_shape_same passed (1)"); - - ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist, - large_ds_buf_1); - - VRFY((ret >= 0), "H5Sread() slice from small ds succeeded."); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - /* verify that expected data is retrieved */ - - data_ok = true; - - start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[3] * large_dims[4]) + (large_sel_start[4])); - - stop_index = start_index + (int)small_ds_slice_size; - - assert(0 <= start_index); - assert(start_index < stop_index); - assert(stop_index <= (int)large_ds_size); - - ptr_1 = large_ds_buf_1; - - for (i = 0; i < start_index; i++) { - - if (*ptr_1 != (uint32_t)0) { - - data_ok = false; - *ptr_1 = (uint32_t)0; - } - - ptr_1++; - } - - VRFY((data_ok == true), "slice read from small ds data good(1)."); - - data_ok = lower_dim_size_comp_test__verify_data(ptr_1, -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - mpi_rank, -#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ - /* rank */ 2, - /* edge_size */ 10, - /* checker_edge_size */ 3, expected_value, - /* buf_starts_in_checker */ true); - - VRFY((data_ok == true), "slice read from small ds data good(2)."); - - data_ok = true; - - ptr_1 += small_ds_slice_size; - - for (i = stop_index; i < (int)large_ds_size; i++) { - - if (*ptr_1 != (uint32_t)0) { - - data_ok = false; - *ptr_1 = (uint32_t)0; - } - - ptr_1++; - } - - VRFY((data_ok == true), "slice read from small ds data good(3)."); - - /* read a checkerboard selection of a slice of the process slice of - * the large on disk data set into the process slice of the small - * in memory data set, and verify the data read. - */ - - small_sel_start[0] = (hsize_t)(mpi_rank + 1); - small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0; - - lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_small_ds_sid, - /* tgt_rank = */ 5, small_dims, - /* checker_edge_size = */ 3, - /* sel_rank */ 2, small_sel_start); - - large_sel_start[0] = (hsize_t)(mpi_rank + 1); - large_sel_start[1] = 5; - large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0; - - lower_dim_size_comp_test__select_checker_board(mpi_rank, file_large_ds_sid, - /* tgt_rank = */ 5, large_dims, - /* checker_edge_size = */ 3, - /* sel_rank = */ 2, large_sel_start); - - /* verify that H5Sselect_shape_same() reports the two - * selections as having the same shape. - */ - check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid); - VRFY((check == true), "H5Sselect_shape_same passed (2)"); - - ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist, - small_ds_buf_1); - - VRFY((ret >= 0), "H5Sread() slice from large ds succeeded."); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - /* verify that expected data is retrieved */ - - data_ok = true; - - expected_value = - (uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[3] * large_dims[4]) + (large_sel_start[4])); - - start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size; - - stop_index = start_index + (int)small_ds_slice_size; - - assert(0 <= start_index); - assert(start_index < stop_index); - assert(stop_index <= (int)small_ds_size); - - ptr_1 = small_ds_buf_1; - - for (i = 0; i < start_index; i++) { - - if (*ptr_1 != (uint32_t)0) { - - data_ok = false; - *ptr_1 = (uint32_t)0; - } - - ptr_1++; - } - - VRFY((data_ok == true), "slice read from large ds data good(1)."); - - data_ok = lower_dim_size_comp_test__verify_data(ptr_1, -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - mpi_rank, -#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ - /* rank */ 2, - /* edge_size */ 10, - /* checker_edge_size */ 3, expected_value, - /* buf_starts_in_checker */ true); - - VRFY((data_ok == true), "slice read from large ds data good(2)."); - - data_ok = true; - - ptr_1 += small_ds_slice_size; - - for (i = stop_index; i < (int)small_ds_size; i++) { - - if (*ptr_1 != (uint32_t)0) { - -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { - fprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", fcnName, mpi_rank, (int)i, - (int)(*ptr_1)); - } -#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ - - data_ok = false; - *ptr_1 = (uint32_t)0; - } - - ptr_1++; - } - - VRFY((data_ok == true), "slice read from large ds data good(3)."); - - /* Close dataspaces */ - ret = H5Sclose(full_mem_small_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded"); - - ret = H5Sclose(full_file_small_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded"); - - ret = H5Sclose(mem_small_ds_sid); - VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded"); - - ret = H5Sclose(file_small_ds_sid); - VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded"); - - ret = H5Sclose(full_mem_large_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded"); - - ret = H5Sclose(full_file_large_ds_sid); - VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded"); - - ret = H5Sclose(mem_large_ds_sid); - VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded"); - - ret = H5Sclose(file_large_ds_sid); - VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded"); - - /* Close Datasets */ - ret = H5Dclose(small_dataset); - VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded"); - - ret = H5Dclose(large_dataset); - VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded"); - - /* close the file collectively */ - MESG("about to close file."); - ret = H5Fclose(fid); - VRFY((ret != FAIL), "file close succeeded"); - - /* Free memory buffers */ - if (small_ds_buf_0 != NULL) - free(small_ds_buf_0); - if (small_ds_buf_1 != NULL) - free(small_ds_buf_1); - - if (large_ds_buf_0 != NULL) - free(large_ds_buf_0); - if (large_ds_buf_1 != NULL) - free(large_ds_buf_1); - - return; - -} /* lower_dim_size_comp_test__run_test() */ - -/*------------------------------------------------------------------------- - * Function: lower_dim_size_comp_test() - * - * Purpose: Test to see if an error in the computation of the size - * of the lower dimensions in H5S_obtain_datatype() has - * been corrected. - * - * Return: void - *------------------------------------------------------------------------- - */ - -void -lower_dim_size_comp_test(void) -{ - /* const char *fcnName = "lower_dim_size_comp_test()"; */ - int chunk_edge_size = 0; - int use_collective_io; - int mpi_rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); - for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) { - chunk_edge_size = 0; - lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT); - - chunk_edge_size = 5; - lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT); - } /* end for */ - - return; -} /* lower_dim_size_comp_test() */ - -/*------------------------------------------------------------------------- - * Function: link_chunk_collective_io_test() - * - * Purpose: Test to verify that an error in MPI type management in - * H5D_link_chunk_collective_io() has been corrected. - * In this bug, we used to free MPI types regardless of - * whether they were basic or derived. - * - * This test is based on a bug report kindly provided by - * Rob Latham of the MPICH team and ANL. - * - * The basic thrust of the test is to cause a process - * to participate in a collective I/O in which it: - * - * 1) Reads or writes exactly one chunk, - * - * 2) Has no in-memory buffer for any other chunk. - * - * The test differs from Rob Latham's bug report in - * that it runs with an arbitrary number of processes, - * and uses a 1-dimensional dataset. - * - * Return: void - *------------------------------------------------------------------------- - */ - -#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16 - -void -link_chunk_collective_io_test(void) -{ - /* const char *fcnName = "link_chunk_collective_io_test()"; */ - const char *filename; - bool mis_match = false; - int i; - int mrc; - int mpi_rank; - int mpi_size; - MPI_Comm mpi_comm = MPI_COMM_WORLD; - MPI_Info mpi_info = MPI_INFO_NULL; - hsize_t count[1] = {1}; - hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; - hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; - hsize_t start[1]; - hsize_t dims[1]; - hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; - herr_t ret; /* Generic return value */ - hid_t file_id; - hid_t acc_tpl; - hid_t dset_id; - hid_t file_ds_sid; - hid_t write_mem_ds_sid; - hid_t read_mem_ds_sid; - hid_t ds_dcpl_id; - hid_t xfer_plist; - double diff; - double expected_value; - double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; - double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; - - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* Make sure the connector supports the API functions being tested */ - if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { - if (MAINPROCESS) { - puts("SKIPPED"); - printf(" API functions for basic file or dataset aren't supported with this connector\n"); - fflush(stdout); - } - - return; - } - - assert(mpi_size > 0); - - /* get the file name */ - filename = (const char *)PARATESTFILE /* GetTestParameters() */; - assert(filename != NULL); - - /* setup file access template */ - acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type); - VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded"); - - /* create the file collectively */ - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - - MESG("File opened."); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); - - /* setup dims */ - dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE)); - - /* setup mem and file dataspaces */ - write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL); - VRFY((write_mem_ds_sid != 0), "H5Screate_simple() write_mem_ds_sid succeeded"); - - read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL); - VRFY((read_mem_ds_sid != 0), "H5Screate_simple() read_mem_ds_sid succeeded"); - - file_ds_sid = H5Screate_simple(1, dims, NULL); - VRFY((file_ds_sid != 0), "H5Screate_simple() file_ds_sid succeeded"); - - /* setup data set creation property list */ - ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); - VRFY((ds_dcpl_id != FAIL), "H5Pcreate() ds_dcpl_id succeeded"); - - ret = H5Pset_layout(ds_dcpl_id, H5D_CHUNKED); - VRFY((ret != FAIL), "H5Pset_layout() ds_dcpl_id succeeded"); - - ret = H5Pset_chunk(ds_dcpl_id, 1, chunk_dims); - VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); - - /* create the data set */ - dset_id = - H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, file_ds_sid, H5P_DEFAULT, ds_dcpl_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded"); - - /* close the dataset creation property list */ - ret = H5Pclose(ds_dcpl_id); - VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded"); - - /* setup local data */ - expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * (double)(mpi_rank); - for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) { - - local_data_written[i] = expected_value; - local_data_read[i] = 0.0; - expected_value += 1.0; - } - - /* select the file and mem spaces */ - start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE); - ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) succeeded"); - - ret = H5Sselect_all(write_mem_ds_sid); - VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded"); - - /* Note that we use NO SELECTION on the read memory dataspace */ - - /* setup xfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* write the data set */ - ret = H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, write_mem_ds_sid, file_ds_sid, xfer_plist, local_data_written); - - VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded"); - - /* sync with the other processes before checking data */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after dataset write"); - - /* read this processes slice of the dataset back in */ - ret = H5Dread(dset_id, H5T_NATIVE_DOUBLE, read_mem_ds_sid, file_ds_sid, xfer_plist, local_data_read); - VRFY((ret >= 0), "H5Dread() dataset read succeeded"); - - /* close the xfer property list */ - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded"); - - /* verify the data */ - mis_match = false; - for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) { - - diff = local_data_written[i] - local_data_read[i]; - diff = fabs(diff); - - if (diff >= 0.001) { - - mis_match = true; - } - } - VRFY((mis_match == false), "dataset data good."); - - /* Close dataspaces */ - ret = H5Sclose(write_mem_ds_sid); - VRFY((ret != FAIL), "H5Sclose(write_mem_ds_sid) succeeded"); - - ret = H5Sclose(read_mem_ds_sid); - VRFY((ret != FAIL), "H5Sclose(read_mem_ds_sid) succeeded"); - - ret = H5Sclose(file_ds_sid); - VRFY((ret != FAIL), "H5Sclose(file_ds_sid) succeeded"); - - /* Close Dataset */ - ret = H5Dclose(dset_id); - VRFY((ret != FAIL), "H5Dclose(dset_id) succeeded"); - - /* close the file collectively */ - ret = H5Fclose(file_id); - VRFY((ret != FAIL), "file close succeeded"); - - return; - -} /* link_chunk_collective_io_test() */ diff --git a/testpar/API/testphdf5.c b/testpar/API/testphdf5.c deleted file mode 100644 index 1d42c61028f..00000000000 --- a/testpar/API/testphdf5.c +++ /dev/null @@ -1,1006 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Main driver of the Parallel HDF5 tests - */ - -#include "hdf5.h" -#include "testphdf5.h" - -#ifndef PATH_MAX -#define PATH_MAX 512 -#endif /* !PATH_MAX */ - -/* global variables */ -int dim0; -int dim1; -int chunkdim0; -int chunkdim1; -int nerrors = 0; /* errors count */ -int ndatasets = 300; /* number of datasets to create*/ -int ngroups = 512; /* number of groups to create in root - * group. */ -int facc_type = FACC_MPIO; /*Test file access type */ -int dxfer_coll_type = DXFER_COLLECTIVE_IO; - -H5E_auto2_t old_func; /* previous error handler */ -void *old_client_data; /* previous error handler arg.*/ - -/* other option flags */ - -/* FILENAME and filenames must have the same number of names. - * Use PARATESTFILE in general and use a separated filename only if the file - * created in one test is accessed by a different test. - * filenames[0] is reserved as the file name for PARATESTFILE. - */ -#define NFILENAME 2 -/* #define PARATESTFILE filenames[0] */ -const char *FILENAME[NFILENAME] = {"ParaTest.h5", NULL}; -char filenames[NFILENAME][PATH_MAX]; -hid_t fapl; /* file access property list */ - -#ifdef USE_PAUSE -/* pause the process for a moment to allow debugger to attach if desired. */ -/* Will pause more if greenlight file is not present but will eventually */ -/* continue. */ -#include -#include - -void -pause_proc(void) -{ - - int pid; - h5_stat_t statbuf; - char greenlight[] = "go"; - int maxloop = 10; - int loops = 0; - int time_int = 10; - - /* mpi variables */ - int mpi_size, mpi_rank; - int mpi_namelen; - char mpi_name[MPI_MAX_PROCESSOR_NAME]; - - pid = getpid(); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - MPI_Get_processor_name(mpi_name, &mpi_namelen); - - if (MAINPROCESS) - while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) { - if (!loops++) { - printf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, pid); - } - printf("waiting(%ds) for file %s ...\n", time_int, greenlight); - fflush(stdout); - HDsleep(time_int); - } - MPI_Barrier(MPI_COMM_WORLD); -} - -/* Use the Profile feature of MPI to call the pause_proc() */ -int -MPI_Init(int *argc, char ***argv) -{ - int ret_code; - ret_code = PMPI_Init(argc, argv); - pause_proc(); - return (ret_code); -} -#endif /* USE_PAUSE */ - -/* - * Show command usage - */ -static void -usage(void) -{ - printf(" [-r] [-w] [-m] [-n] " - "[-o] [-f ] [-d ]\n"); - printf("\t-m" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n" - "\tset number of groups for the multiple group test\n"); -#if 0 - printf("\t-f \tfilename prefix\n"); -#endif - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, - COL_FACTOR); - printf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); -} - -/* - * parse the command line options - */ -static int -parse_options(int argc, char **argv) -{ - int mpi_size, mpi_rank; /* mpi variables */ - - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - /* setup default chunk-size. Make sure sizes are > 0 */ - - chunkdim0 = (dim0 + 9) / 10; - chunkdim1 = (dim1 + 9) / 10; - - while (--argc) { - if (**(++argv) != '-') { - break; - } - else { - switch (*(*argv + 1)) { - case 'm': - ndatasets = atoi((*argv + 1) + 1); - if (ndatasets < 0) { - nerrors++; - return (1); - } - break; - case 'n': - ngroups = atoi((*argv + 1) + 1); - if (ngroups < 0) { - nerrors++; - return (1); - } - break; -#if 0 - case 'f': if (--argc < 1) { - nerrors++; - return(1); - } - if (**(++argv) == '-') { - nerrors++; - return(1); - } - paraprefix = *argv; - break; -#endif - case 'i': /* Collective MPI-IO access with independent IO */ - dxfer_coll_type = DXFER_INDEPENDENT_IO; - break; - case '2': /* Use the split-file driver with MPIO access */ - /* Can use $HDF5_METAPREFIX to define the */ - /* meta-file-prefix. */ - facc_type = FACC_MPIO | FACC_SPLIT; - break; - case 'd': /* dimensizes */ - if (--argc < 2) { - nerrors++; - return (1); - } - dim0 = atoi(*(++argv)) * mpi_size; - argc--; - dim1 = atoi(*(++argv)) * mpi_size; - /* set default chunkdim sizes too */ - chunkdim0 = (dim0 + 9) / 10; - chunkdim1 = (dim1 + 9) / 10; - break; - case 'c': /* chunk dimensions */ - if (--argc < 2) { - nerrors++; - return (1); - } - chunkdim0 = atoi(*(++argv)); - argc--; - chunkdim1 = atoi(*(++argv)); - break; - case 'h': /* print help message--return with nerrors set */ - return (1); - default: - printf("Illegal option(%s)\n", *argv); - nerrors++; - return (1); - } - } - } /*while*/ - - /* check validity of dimension and chunk sizes */ - if (dim0 <= 0 || dim1 <= 0) { - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); - nerrors++; - return (1); - } - if (chunkdim0 <= 0 || chunkdim1 <= 0) { - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); - nerrors++; - return (1); - } - - /* Make sure datasets can be divided into equal portions by the processes */ - if ((dim0 % mpi_size) || (dim1 % mpi_size)) { - if (MAINPROCESS) - printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); - nerrors++; - return (1); - } - - /* compose the test filenames */ - { - int i, n; - - n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ - - for (i = 0; i < n; i++) - strncpy(filenames[i], FILENAME[i], PATH_MAX); -#if 0 /* no support for VFDs right now */ - if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) { - printf("h5_fixname failed\n"); - nerrors++; - return (1); - } -#endif - if (MAINPROCESS) { - printf("Test filenames are:\n"); - for (i = 0; i < n; i++) - printf(" %s\n", filenames[i]); - } - } - - return (0); -} - -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = -1; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO) { - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); - ret = H5Pset_all_coll_metadata_ops(ret_pl, true); - VRFY((ret >= 0), ""); - ret = H5Pset_coll_metadata_write(ret_pl, true); - VRFY((ret >= 0), ""); - return (ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { - hid_t mpio_pl; - - mpio_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return (ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - -int -main(int argc, char **argv) -{ - int mpi_size, mpi_rank; /* mpi variables */ - herr_t ret; - -#if 0 - H5Ptest_param_t ndsets_params, ngroups_params; - H5Ptest_param_t collngroups_params; - H5Ptest_param_t io_mode_confusion_params; - H5Ptest_param_t rr_obj_flush_confusion_params; -#endif - -#ifndef H5_HAVE_WIN32_API - /* Un-buffer the stdout and stderr */ - HDsetbuf(stderr, NULL); - HDsetbuf(stdout, NULL); -#endif - - MPI_Init(&argc, &argv); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - dim0 = ROW_FACTOR * mpi_size; - dim1 = COL_FACTOR * mpi_size; - - if (MAINPROCESS) { - printf("===================================\n"); - printf("PHDF5 TESTS START\n"); - printf("===================================\n"); - } - - /* Attempt to turn off atexit post processing so that in case errors - * happen during the test and the process is aborted, it will not get - * hung in the atexit post processing in which it may try to make MPI - * calls. By then, MPI calls may not work. - */ - if (H5dont_atexit() < 0) { - printf("Failed to turn off atexit processing. Continue.\n"); - }; - H5open(); - /* h5_show_hostname(); */ - -#if 0 - memset(filenames, 0, sizeof(filenames)); - for (int i = 0; i < NFILENAME; i++) { - if (NULL == (filenames[i] = malloc(PATH_MAX))) { - printf("couldn't allocate filename array\n"); - MPI_Abort(MPI_COMM_WORLD, -1); - } - } -#endif - - /* Set up file access property list with parallel I/O access */ - fapl = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl >= 0), "H5Pcreate succeeded"); - - vol_cap_flags_g = H5VL_CAP_FLAG_NONE; - - /* Get the capability flag of the VOL connector being used */ - ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g); - VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded"); - - /* Initialize testing framework */ - /* TestInit(argv[0], usage, parse_options); */ - - if (parse_options(argc, argv)) { - usage(); - return 1; - } - - /* Tests are generally arranged from least to most complexity... */ -#if 0 - AddTest("mpiodup", test_fapl_mpio_dup, NULL, - "fapl_mpio duplicate", NULL); -#endif - - if (MAINPROCESS) { - printf("fapl_mpio duplicate\n"); - fflush(stdout); - } - test_fapl_mpio_dup(); - -#if 0 - AddTest("split", test_split_comm_access, NULL, - "dataset using split communicators", PARATESTFILE); - AddTest("props", test_file_properties, NULL, - "Coll Metadata file property settings", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("dataset using split communicators\n"); - fflush(stdout); - } - test_split_comm_access(); - - if (MAINPROCESS) { - printf("Coll Metadata file property settings\n"); - fflush(stdout); - } - test_file_properties(); - -#if 0 - AddTest("idsetw", dataset_writeInd, NULL, - "dataset independent write", PARATESTFILE); - AddTest("idsetr", dataset_readInd, NULL, - "dataset independent read", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("dataset independent write\n"); - fflush(stdout); - } - dataset_writeInd(); - if (MAINPROCESS) { - printf("dataset independent read\n"); - fflush(stdout); - } - dataset_readInd(); - -#if 0 - AddTest("cdsetw", dataset_writeAll, NULL, - "dataset collective write", PARATESTFILE); - AddTest("cdsetr", dataset_readAll, NULL, - "dataset collective read", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("dataset collective write\n"); - fflush(stdout); - } - dataset_writeAll(); - if (MAINPROCESS) { - printf("dataset collective read\n"); - fflush(stdout); - } - dataset_readAll(); - -#if 0 - AddTest("eidsetw", extend_writeInd, NULL, - "extendible dataset independent write", PARATESTFILE); - AddTest("eidsetr", extend_readInd, NULL, - "extendible dataset independent read", PARATESTFILE); - AddTest("ecdsetw", extend_writeAll, NULL, - "extendible dataset collective write", PARATESTFILE); - AddTest("ecdsetr", extend_readAll, NULL, - "extendible dataset collective read", PARATESTFILE); - AddTest("eidsetw2", extend_writeInd2, NULL, - "extendible dataset independent write #2", PARATESTFILE); - AddTest("selnone", none_selection_chunk, NULL, - "chunked dataset with none-selection", PARATESTFILE); - AddTest("calloc", test_chunk_alloc, NULL, - "parallel extend Chunked allocation on serial file", PARATESTFILE); - AddTest("fltread", test_filter_read, NULL, - "parallel read of dataset written serially with filters", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("extendible dataset independent write\n"); - fflush(stdout); - } - extend_writeInd(); - if (MAINPROCESS) { - printf("extendible dataset independent read\n"); - fflush(stdout); - } - extend_readInd(); - if (MAINPROCESS) { - printf("extendible dataset collective write\n"); - fflush(stdout); - } - extend_writeAll(); - if (MAINPROCESS) { - printf("extendible dataset collective read\n"); - fflush(stdout); - } - extend_readAll(); - if (MAINPROCESS) { - printf("extendible dataset independent write #2\n"); - fflush(stdout); - } - extend_writeInd2(); - if (MAINPROCESS) { - printf("chunked dataset with none-selection\n"); - fflush(stdout); - } - none_selection_chunk(); - if (MAINPROCESS) { - printf("parallel extend Chunked allocation on serial file\n"); - fflush(stdout); - } - test_chunk_alloc(); - if (MAINPROCESS) { - printf("parallel read of dataset written serially with filters\n"); - fflush(stdout); - } - test_filter_read(); - -#ifdef H5_HAVE_FILTER_DEFLATE -#if 0 - AddTest("cmpdsetr", compress_readAll, NULL, - "compressed dataset collective read", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("compressed dataset collective read\n"); - fflush(stdout); - } - compress_readAll(); -#endif /* H5_HAVE_FILTER_DEFLATE */ - -#if 0 - AddTest("zerodsetr", zero_dim_dset, NULL, - "zero dim dset", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("zero dim dset\n"); - fflush(stdout); - } - zero_dim_dset(); - -#if 0 - ndsets_params.name = PARATESTFILE; - ndsets_params.count = ndatasets; - AddTest("ndsetw", multiple_dset_write, NULL, - "multiple datasets write", &ndsets_params); -#endif - - if (MAINPROCESS) { - printf("multiple datasets write\n"); - fflush(stdout); - } - multiple_dset_write(); - -#if 0 - ngroups_params.name = PARATESTFILE; - ngroups_params.count = ngroups; - AddTest("ngrpw", multiple_group_write, NULL, - "multiple groups write", &ngroups_params); - AddTest("ngrpr", multiple_group_read, NULL, - "multiple groups read", &ngroups_params); -#endif - - if (MAINPROCESS) { - printf("multiple groups write\n"); - fflush(stdout); - } - multiple_group_write(); - if (MAINPROCESS) { - printf("multiple groups read\n"); - fflush(stdout); - } - multiple_group_read(); - -#if 0 - AddTest("compact", compact_dataset, NULL, - "compact dataset test", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("compact dataset test\n"); - fflush(stdout); - } - compact_dataset(); - -#if 0 - collngroups_params.name = PARATESTFILE; - collngroups_params.count = ngroups; - /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ - AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL, - "collective grp/dset write - independent grp/dset read", - &collngroups_params); -#ifndef H5_HAVE_WIN32_API - AddTest("bigdset", big_dataset, NULL, - "big dataset test", PARATESTFILE); -#else - printf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n"); -#endif -#endif - - if (MAINPROCESS) { - printf("collective grp/dset write - independent grp/dset read\n"); - fflush(stdout); - } - collective_group_write_independent_group_read(); - if (MAINPROCESS) { - printf("big dataset test\n"); - fflush(stdout); - } - big_dataset(); - -#if 0 - AddTest("fill", dataset_fillvalue, NULL, - "dataset fill value", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("dataset fill value\n"); - fflush(stdout); - } - dataset_fillvalue(); - -#if 0 - AddTest("cchunk1", - coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE); - AddTest("cchunk2", - coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE); - AddTest("cchunk3", - coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE); - AddTest("cchunk4", - coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("simple collective chunk io\n"); - fflush(stdout); - } - coll_chunk1(); - if (MAINPROCESS) { - printf("noncontiguous collective chunk io\n"); - fflush(stdout); - } - coll_chunk2(); - if (MAINPROCESS) { - printf("multi-chunk collective chunk io\n"); - fflush(stdout); - } - coll_chunk3(); - if (MAINPROCESS) { - printf("collective chunk io with partial non-selection\n"); - fflush(stdout); - } - coll_chunk4(); - - if ((mpi_size < 3) && MAINPROCESS) { - printf("Collective chunk IO optimization APIs "); - printf("needs at least 3 processes to participate\n"); - printf("Collective chunk IO API tests will be skipped \n"); - } - -#if 0 - AddTest((mpi_size <3)? "-cchunk5":"cchunk5" , - coll_chunk5,NULL, - "linked chunk collective IO without optimization",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6", - coll_chunk6,NULL, - "multi-chunk collective IO with direct request",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7", - coll_chunk7,NULL, - "linked chunk collective IO with optimization",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8", - coll_chunk8,NULL, - "linked chunk collective IO transferring to multi-chunk",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9", - coll_chunk9,NULL, - "multiple chunk collective IO with optimization",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10", - coll_chunk10,NULL, - "multiple chunk collective IO transferring to independent IO",PARATESTFILE); -#endif - - if (mpi_size >= 3) { - if (MAINPROCESS) { - printf("linked chunk collective IO without optimization\n"); - fflush(stdout); - } - coll_chunk5(); - if (MAINPROCESS) { - printf("multi-chunk collective IO with direct request\n"); - fflush(stdout); - } - coll_chunk6(); - if (MAINPROCESS) { - printf("linked chunk collective IO with optimization\n"); - fflush(stdout); - } - coll_chunk7(); - if (MAINPROCESS) { - printf("linked chunk collective IO transferring to multi-chunk\n"); - fflush(stdout); - } - coll_chunk8(); - if (MAINPROCESS) { - printf("multiple chunk collective IO with optimization\n"); - fflush(stdout); - } - coll_chunk9(); - if (MAINPROCESS) { - printf("multiple chunk collective IO transferring to independent IO\n"); - fflush(stdout); - } - coll_chunk10(); - } - -#if 0 - /* irregular collective IO tests*/ - AddTest("ccontw", - coll_irregular_cont_write,NULL, - "collective irregular contiguous write",PARATESTFILE); - AddTest("ccontr", - coll_irregular_cont_read,NULL, - "collective irregular contiguous read",PARATESTFILE); - AddTest("cschunkw", - coll_irregular_simple_chunk_write,NULL, - "collective irregular simple chunk write",PARATESTFILE); - AddTest("cschunkr", - coll_irregular_simple_chunk_read,NULL, - "collective irregular simple chunk read",PARATESTFILE); - AddTest("ccchunkw", - coll_irregular_complex_chunk_write,NULL, - "collective irregular complex chunk write",PARATESTFILE); - AddTest("ccchunkr", - coll_irregular_complex_chunk_read,NULL, - "collective irregular complex chunk read",PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("collective irregular contiguous write\n"); - fflush(stdout); - } - coll_irregular_cont_write(); - if (MAINPROCESS) { - printf("collective irregular contiguous read\n"); - fflush(stdout); - } - coll_irregular_cont_read(); - if (MAINPROCESS) { - printf("collective irregular simple chunk write\n"); - fflush(stdout); - } - coll_irregular_simple_chunk_write(); - if (MAINPROCESS) { - printf("collective irregular simple chunk read\n"); - fflush(stdout); - } - coll_irregular_simple_chunk_read(); - if (MAINPROCESS) { - printf("collective irregular complex chunk write\n"); - fflush(stdout); - } - coll_irregular_complex_chunk_write(); - if (MAINPROCESS) { - printf("collective irregular complex chunk read\n"); - fflush(stdout); - } - coll_irregular_complex_chunk_read(); - -#if 0 - AddTest("null", null_dataset, NULL, - "null dataset test", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("null dataset test\n"); - fflush(stdout); - } - null_dataset(); - -#if 0 - io_mode_confusion_params.name = PARATESTFILE; - io_mode_confusion_params.count = 0; /* value not used */ - - AddTest("I/Omodeconf", io_mode_confusion, NULL, - "I/O mode confusion test", - &io_mode_confusion_params); -#endif - - if (MAINPROCESS) { - printf("I/O mode confusion test\n"); - fflush(stdout); - } - io_mode_confusion(); - - if ((mpi_size < 3) && MAINPROCESS) { - printf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); - printf("rr_obj_hdr_flush_confusion test will be skipped \n"); - } - - if (mpi_size > 2) { -#if 0 - rr_obj_flush_confusion_params.name = PARATESTFILE; - rr_obj_flush_confusion_params.count = 0; /* value not used */ - AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL, - "round robin object header flush confusion test", - &rr_obj_flush_confusion_params); -#endif - - if (MAINPROCESS) { - printf("round robin object header flush confusion test\n"); - fflush(stdout); - } - rr_obj_hdr_flush_confusion(); - } - -#if 0 - AddTest("alnbg1", - chunk_align_bug_1, NULL, - "Chunk allocation with alignment bug.", - PARATESTFILE); - - AddTest("tldsc", - lower_dim_size_comp_test, NULL, - "test lower dim size comp in span tree to mpi derived type", - PARATESTFILE); - - AddTest("lccio", - link_chunk_collective_io_test, NULL, - "test mpi derived type management", - PARATESTFILE); - - AddTest("actualio", actual_io_mode_tests, NULL, - "test actual io mode proprerty", - PARATESTFILE); - - AddTest("nocolcause", no_collective_cause_tests, NULL, - "test cause for broken collective io", - PARATESTFILE); - - AddTest("edpl", test_plist_ed, NULL, - "encode/decode Property Lists", NULL); -#endif - - if (MAINPROCESS) { - printf("Chunk allocation with alignment bug\n"); - fflush(stdout); - } - chunk_align_bug_1(); - if (MAINPROCESS) { - printf("test lower dim size comp in span tree to mpi derived type\n"); - fflush(stdout); - } - lower_dim_size_comp_test(); - if (MAINPROCESS) { - printf("test mpi derived type management\n"); - fflush(stdout); - } - link_chunk_collective_io_test(); - if (MAINPROCESS) { - printf("test actual io mode property - SKIPPED currently due to native-specific testing\n"); - fflush(stdout); - } - /* actual_io_mode_tests(); */ - if (MAINPROCESS) { - printf("test cause for broken collective io - SKIPPED currently due to native-specific testing\n"); - fflush(stdout); - } - /* no_collective_cause_tests(); */ - if (MAINPROCESS) { - printf("encode/decode Property Lists\n"); - fflush(stdout); - } - test_plist_ed(); - - if ((mpi_size < 2) && MAINPROCESS) { - printf("File Image Ops daisy chain test needs at least 2 processes.\n"); - printf("File Image Ops daisy chain test will be skipped \n"); - } - -#if 0 - AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL, - "file image ops daisy chain", NULL); -#endif - - if (mpi_size >= 2) { - if (MAINPROCESS) { - printf("file image ops daisy chain - SKIPPED currently due to native-specific testing\n"); - fflush(stdout); - } - /* file_image_daisy_chain_test(); */ - } - - if ((mpi_size < 2) && MAINPROCESS) { - printf("Atomicity tests need at least 2 processes to participate\n"); - printf("8 is more recommended.. Atomicity tests will be skipped \n"); - } - else if (facc_type != FACC_MPIO && MAINPROCESS) { - printf("Atomicity tests will not work with a non MPIO VFD\n"); - } - else if (mpi_size >= 2 && facc_type == FACC_MPIO) { -#if 0 - AddTest("atomicity", dataset_atomicity, NULL, - "dataset atomic updates", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("dataset atomic updates - SKIPPED currently due to native-specific testing\n"); - fflush(stdout); - } - /* dataset_atomicity(); */ - } - -#if 0 - AddTest("denseattr", test_dense_attr, NULL, - "Store Dense Attributes", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("Store Dense Attributes\n"); - fflush(stdout); - } - test_dense_attr(); - -#if 0 - AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL, - "Collective Metadata read with some ranks having no selection", PARATESTFILE); - AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL, - "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE); - AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL, - "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE); -#endif - - if (MAINPROCESS) { - printf("Collective Metadata read with some ranks having no selection\n"); - fflush(stdout); - } - test_partial_no_selection_coll_md_read(); - if (MAINPROCESS) { - printf("Collective MD read with multi chunk I/O\n"); - fflush(stdout); - } - test_multi_chunk_io_addrmap_issue(); - if (MAINPROCESS) { - printf("Collective MD read with link chunk I/O\n"); - fflush(stdout); - } - test_link_chunk_io_sort_chunk_issue(); - - /* Display testing information */ - /* TestInfo(argv[0]); */ - - /* setup file access property list */ - H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); - - /* Parse command line arguments */ - /* TestParseCmdLine(argc, argv); */ - - if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { - printf("===================================\n" - " Using Independent I/O with file set view to replace collective I/O \n" - "===================================\n"); - } - - /* Perform requested testing */ - /* PerformTests(); */ - - /* make sure all processes are finished before final report, cleanup - * and exit. - */ - MPI_Barrier(MPI_COMM_WORLD); - - /* Display test summary, if requested */ - /* if (MAINPROCESS && GetTestSummary()) - TestSummary(); */ - - /* Clean up test files */ - /* h5_clean_files(FILENAME, fapl); */ - H5Fdelete(FILENAME[0], fapl); - H5Pclose(fapl); - - /* nerrors += GetTestNumErrs(); */ - - /* Gather errors from all processes */ - { - int temp; - MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - nerrors = temp; - } - - if (MAINPROCESS) { /* only process 0 reports */ - printf("===================================\n"); - if (nerrors) - printf("***PHDF5 tests detected %d errors***\n", nerrors); - else - printf("PHDF5 tests finished successfully\n"); - printf("===================================\n"); - } - -#if 0 - for (int i = 0; i < NFILENAME; i++) { - free(filenames[i]); - filenames[i] = NULL; - } -#endif - - /* close HDF5 library */ - H5close(); - - /* Release test infrastructure */ - /* TestShutdown(); */ - - /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ - MPI_Finalize(); - - /* cannot just return (nerrors) because exit code is limited to 1byte */ - return (nerrors != 0); -} diff --git a/testpar/API/testphdf5.h b/testpar/API/testphdf5.h deleted file mode 100644 index 59dd5774e13..00000000000 --- a/testpar/API/testphdf5.h +++ /dev/null @@ -1,342 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://www.hdfgroup.org/licenses. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* common definitions used by all parallel hdf5 test programs. */ - -#ifndef PHDF5TEST_H -#define PHDF5TEST_H - -#include "H5private.h" -#include "testpar.h" - -/* - * Define parameters for various tests since we do not have access to - * passing parameters to tests via the testphdf5 test framework. - */ -#define PARATESTFILE "ParaTest.h5" -#define NDATASETS 300 -#define NGROUPS 256 - -/* Disable express testing by default */ -#define EXPRESS_MODE 0 - -enum H5TEST_COLL_CHUNK_API { - API_NONE = 0, - API_LINK_HARD, - API_MULTI_HARD, - API_LINK_TRUE, - API_LINK_FALSE, - API_MULTI_COLL, - API_MULTI_IND -}; - -#ifndef false -#define false 0 -#endif - -#ifndef true -#define true 1 -#endif - -/* Constants definitions */ -#define DIM0 600 /* Default dataset sizes. */ -#define DIM1 1200 /* Values are from a monitor pixel sizes */ -#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ -#define COL_FACTOR 16 /* Nominal column factor for dataset size */ -#define RANK 2 -#define DATASETNAME1 "Data1" -#define DATASETNAME2 "Data2" -#define DATASETNAME3 "Data3" -#define DATASETNAME4 "Data4" -#define DATASETNAME5 "Data5" -#define DATASETNAME6 "Data6" -#define DATASETNAME7 "Data7" -#define DATASETNAME8 "Data8" -#define DATASETNAME9 "Data9" - -/* point selection order */ -#define IN_ORDER 1 -#define OUT_OF_ORDER 2 - -/* Hyperslab layout styles */ -#define BYROW 1 /* divide into slabs of rows */ -#define BYCOL 2 /* divide into blocks of columns */ -#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */ -#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */ - -/* File_Access_type bits */ -#define FACC_DEFAULT 0x0 /* default */ -#define FACC_MPIO 0x1 /* MPIO */ -#define FACC_SPLIT 0x2 /* Split File */ - -#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ -#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ -/*Constants for collective chunk definitions */ -#define SPACE_DIM1 24 -#define SPACE_DIM2 4 -#define BYROW_CONT 1 -#define BYROW_DISCONT 2 -#define BYROW_SELECTNONE 3 -#define BYROW_SELECTUNBALANCE 4 -#define BYROW_SELECTINCHUNK 5 - -#define DIMO_NUM_CHUNK 4 -#define DIM1_NUM_CHUNK 2 -#define LINK_TRUE_NUM_CHUNK 2 -#define LINK_FALSE_NUM_CHUNK 6 -#define MULTI_TRUE_PERCENT 50 -#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_TRUE" -#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_FALSE" -#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard" -#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard" -#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll" -#define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp" - -#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name" - -/*Constants for MPI derived data type generated from span tree */ - -#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */ -#define MSPACE1_DIM 27000 /* Dataset size in memory */ -#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */ -#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */ -#define FSPACE_DIM2 3600 -/* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */ -#define MSPACE_RANK 2 -#define MSPACE_DIM1 9 -#define MSPACE_DIM2 3600 -#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ -#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ -#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/ -#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/ -#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ -#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/ -#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ -#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/ - -#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ -#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/ -#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ -#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/ -#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/ -#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ - -#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/ -#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ - -#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ -#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ -#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ - -#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ -#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/ -#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/ - -#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ -#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ -#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ - -#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ -#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ -#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ - -#define NPOINTS \ - 4 /* Number of points that will be selected \ - and overwritten */ - -/* Definitions of the selection mode for the test_actual_io_function. */ -#define TEST_ACTUAL_IO_NO_COLLECTIVE 0 -#define TEST_ACTUAL_IO_RESET 1 -#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2 -#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3 -#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4 -#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5 -#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6 -#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7 -#define TEST_ACTUAL_IO_LINK_CHUNK 8 -#define TEST_ACTUAL_IO_CONTIGUOUS 9 - -/* Definitions of the selection mode for the no_collective_cause_tests function. */ -#define TEST_COLLECTIVE 0x001 -#define TEST_SET_INDEPENDENT 0x002 -#define TEST_DATATYPE_CONVERSION 0x004 -#define TEST_DATA_TRANSFORMS 0x008 -#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010 -#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020 -#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040 - -/* Don't erase these lines, they are put here for debugging purposes */ -/* -#define MSPACE1_RANK 1 -#define MSPACE1_DIM 50 -#define MSPACE2_RANK 1 -#define MSPACE2_DIM 4 -#define FSPACE_RANK 2 -#define FSPACE_DIM1 8 -#define FSPACE_DIM2 12 -#define MSPACE_RANK 2 -#define MSPACE_DIM1 8 -#define MSPACE_DIM2 9 -#define NPOINTS 4 -*/ /* end of debugging macro */ - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY -/* Collective chunk instrumentation properties */ -#define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard" -#define H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME "coll_chunk_multi_hard" -#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_TRUE" -#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_FALSE" -#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME "coll_chunk_multi_coll" -#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME "coll_chunk_multi_ind" - -/* Definitions for all collective chunk instrumentation properties */ -#define H5D_XFER_COLL_CHUNK_SIZE sizeof(unsigned) -#define H5D_XFER_COLL_CHUNK_DEF 1 - -/* General collective I/O instrumentation properties */ -#define H5D_XFER_COLL_RANK0_BCAST_NAME "coll_rank0_bcast" - -/* Definitions for general collective I/O instrumentation properties */ -#define H5D_XFER_COLL_RANK0_BCAST_SIZE sizeof(bool) -#define H5D_XFER_COLL_RANK0_BCAST_DEF false -#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - -/* type definitions */ -typedef struct H5Ptest_param_t /* holds extra test parameters */ -{ - char *name; - int count; -} H5Ptest_param_t; - -/* Dataset data type. Int's can be easily octo dumped. */ -typedef int DATATYPE; - -/* Shape Same Tests Definitions */ -typedef enum { - IND_CONTIG, /* Independent IO on contiguous datasets */ - COL_CONTIG, /* Collective IO on contiguous datasets */ - IND_CHUNKED, /* Independent IO on chunked datasets */ - COL_CHUNKED /* Collective IO on chunked datasets */ -} ShapeSameTestMethods; - -/* Shared global variables */ -extern int dim0, dim1; /*Dataset dimensions */ -extern int chunkdim0, chunkdim1; /*Chunk dimensions */ -extern int nerrors; /*errors count */ -extern H5E_auto2_t old_func; /* previous error handler */ -extern void *old_client_data; /*previous error handler arg.*/ -extern int facc_type; /*Test file access type */ -extern int dxfer_coll_type; - -/* Test program prototypes */ -void test_plist_ed(void); -#if 0 -void external_links(void); -#endif -void zero_dim_dset(void); -void test_file_properties(void); -void test_delete(void); -void multiple_dset_write(void); -void multiple_group_write(void); -void multiple_group_read(void); -void collective_group_write_independent_group_read(void); -void collective_group_write(void); -void independent_group_read(void); -void test_fapl_mpio_dup(void); -void test_split_comm_access(void); -void test_page_buffer_access(void); -void dataset_atomicity(void); -void dataset_writeInd(void); -void dataset_writeAll(void); -void extend_writeInd(void); -void extend_writeInd2(void); -void extend_writeAll(void); -void dataset_readInd(void); -void dataset_readAll(void); -void extend_readInd(void); -void extend_readAll(void); -void none_selection_chunk(void); -void actual_io_mode_tests(void); -void no_collective_cause_tests(void); -void test_chunk_alloc(void); -void test_filter_read(void); -void compact_dataset(void); -void null_dataset(void); -void big_dataset(void); -void dataset_fillvalue(void); -void coll_chunk1(void); -void coll_chunk2(void); -void coll_chunk3(void); -void coll_chunk4(void); -void coll_chunk5(void); -void coll_chunk6(void); -void coll_chunk7(void); -void coll_chunk8(void); -void coll_chunk9(void); -void coll_chunk10(void); -void coll_irregular_cont_read(void); -void coll_irregular_cont_write(void); -void coll_irregular_simple_chunk_read(void); -void coll_irregular_simple_chunk_write(void); -void coll_irregular_complex_chunk_read(void); -void coll_irregular_complex_chunk_write(void); -void io_mode_confusion(void); -void rr_obj_hdr_flush_confusion(void); -void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm); -void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm); -void chunk_align_bug_1(void); -void lower_dim_size_comp_test(void); -void link_chunk_collective_io_test(void); -void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type); -void checker_board_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type); -void file_image_daisy_chain_test(void); -#ifdef H5_HAVE_FILTER_DEFLATE -void compress_readAll(void); -#endif /* H5_HAVE_FILTER_DEFLATE */ -void test_dense_attr(void); -void test_partial_no_selection_coll_md_read(void); -void test_multi_chunk_io_addrmap_issue(void); -void test_link_chunk_io_sort_chunk_issue(void); -void test_collective_global_heap_write(void); - -/* commonly used prototypes */ -hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type); -MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info); -int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, - DATATYPE *original); -void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, - hsize_t coords[], int order); -#endif /* PHDF5TEST_H */ diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index 2726f91b55c..910c7a2612b 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -1,3 +1,14 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include "hdf5.h" #include "testphdf5.h" @@ -1854,7 +1865,8 @@ main(int argc, char **argv) { hsize_t newsize = 1048576; /* Set the bigio processing limit to be 'newsize' bytes */ - hsize_t oldsize = H5_mpi_set_bigio_count(newsize); + hsize_t oldsize = H5_mpi_set_bigio_count(newsize); + hid_t acc_plist = H5I_INVALID_HID; /* Having set the bigio handling to a size that is manageable, * we'll set our 'bigcount' variable to be 2X that limit so @@ -1879,6 +1891,30 @@ main(int argc, char **argv) /* set alarm. */ TestAlarmOn(); + acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + + /* Get the capability flag of the VOL connector being used */ + if (H5Pget_vol_cap_flags(acc_plist, &vol_cap_flags_g) < 0) { + if (MAIN_PROCESS) + printf("Failed to get the capability flag of the VOL connector being used\n"); + + MPI_Finalize(); + return -1; + } + + /* Make sure the connector supports the API functions being tested. This test only + * uses a few API functions, such as H5Fcreate/open/close/delete, H5Dcreate/write/read/close, + * and H5Dget_space. */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAIN_PROCESS) + printf( + "API functions for basic file, dataset basic or more aren't supported with this connector\n"); + + MPI_Finalize(); + return 0; + } + dataset_big_write(); MPI_Barrier(MPI_COMM_WORLD); @@ -1900,9 +1936,6 @@ main(int argc, char **argv) H5_mpi_set_bigio_count(oldsize); single_rank_independent_io(); - /* turn off alarm */ - TestAlarmOff(); - if (mpi_rank_g == 0) { hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS); @@ -1926,6 +1959,11 @@ main(int argc, char **argv) printf("==================================================\n"); } + H5Pclose(acc_plist); + + /* turn off alarm */ + TestAlarmOff(); + /* close HDF5 library */ H5close(); diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c index d02951d54ee..1d5978306a8 100644 --- a/testpar/t_chunk_alloc.c +++ b/testpar/t_chunk_alloc.c @@ -80,6 +80,8 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ /* Only MAINPROCESS should create the file. Others just wait. */ if (MAINPROCESS) { + bool vol_is_native; + nchunks = chunk_factor * mpi_size; dims[0] = (hsize_t)(nchunks * CHUNK_SIZE); /* Create the data space with unlimited dimensions. */ @@ -93,6 +95,9 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); VRFY((file_id >= 0), "H5Fcreate"); + /* Check if native VOL is being used */ + VRFY((h5_using_native_vol(H5P_DEFAULT, file_id, &vol_is_native) >= 0), "h5_using_native_vol"); + /* Modify dataset creation properties, i.e. enable chunking */ cparms = H5Pcreate(H5P_DATASET_CREATE); VRFY((cparms >= 0), ""); @@ -142,10 +147,12 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ VRFY((hrc >= 0), ""); file_id = -1; - /* verify file size */ - filesize = get_filesize(filename); - est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); - VRFY((filesize >= est_filesize), "file size check"); + if (vol_is_native) { + /* verify file size */ + filesize = get_filesize(filename); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); + VRFY((filesize >= est_filesize), "file size check"); + } } /* Make sure all processes are done before exiting this routine. Otherwise, @@ -187,6 +194,8 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti MPI_Offset filesize, /* actual file size */ est_filesize; /* estimated file size */ + bool vol_is_native; + /* Initialize MPI */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -206,12 +215,20 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti VRFY((*file_id >= 0), ""); } + /* Check if native VOL is being used */ + VRFY((h5_using_native_vol(H5P_DEFAULT, *file_id, &vol_is_native) >= 0), "h5_using_native_vol"); + /* Open dataset*/ if (*dataset < 0) { *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT); VRFY((*dataset >= 0), ""); } + /* Make sure all processes are done before continuing. Otherwise, one + * process could change the dataset extent before another finishes opening + * it, resulting in only some of the processes calling H5Dset_extent(). */ + MPI_Barrier(MPI_COMM_WORLD); + memspace = H5Screate_simple(1, chunk_dims, NULL); VRFY((memspace >= 0), ""); @@ -277,10 +294,12 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti VRFY((hrc >= 0), ""); *file_id = -1; - /* verify file size */ - filesize = get_filesize(filename); - est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); - VRFY((filesize >= est_filesize), "file size check"); + if (vol_is_native) { + /* verify file size */ + filesize = get_filesize(filename); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); + VRFY((filesize >= est_filesize), "file size check"); + } /* Can close some plists */ hrc = H5Pclose(access_plist); @@ -448,6 +467,19 @@ test_chunk_alloc(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + filename = (const char *)GetTestParameters(); if (VERBOSE_MED) printf("Extend Chunked allocation test on file %s\n", filename); @@ -530,6 +562,7 @@ test_chunk_alloc_incr_ser_to_par(void) int *data = NULL; int *correct_data = NULL; int *read_data = NULL; + bool vol_is_native; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -598,6 +631,9 @@ test_chunk_alloc_incr_ser_to_par(void) fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); VRFY((fid >= 0), "H5Fopen"); + /* Check if native VOL is being used */ + VRFY((h5_using_native_vol(H5P_DEFAULT, fid, &vol_is_native) >= 0), "h5_using_native_vol"); + data = malloc((dset_dims[0] / (hsize_t)mpi_size) * sizeof(int)); VRFY(data, "malloc"); read_data = malloc(dset_dims[0] * sizeof(int)); @@ -613,13 +649,17 @@ test_chunk_alloc_incr_ser_to_par(void) dset_id = H5Dopen2(fid, "dset_no_filter", H5P_DEFAULT); VRFY((dset_id >= 0), "H5Dopen2"); - ret = H5Dget_space_status(dset_id, &space_status); - VRFY((ret == SUCCEED), "H5Dread"); + if (vol_is_native) { + ret = H5Dget_space_status(dset_id, &space_status); + VRFY((ret == SUCCEED), "H5Dread"); - VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "file space allocation status verification succeeded"); + VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), + "file space allocation status verification succeeded"); - alloc_size = H5Dget_storage_size(dset_id); - VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), "file space allocation size verification succeeded"); + alloc_size = H5Dget_storage_size(dset_id); + VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), + "file space allocation size verification succeeded"); + } memset(read_data, 255, dset_dims[0] * sizeof(int)); memset(correct_data, 0, dset_dims[0] * sizeof(int)); @@ -649,13 +689,17 @@ test_chunk_alloc_incr_ser_to_par(void) MPI_Barrier(MPI_COMM_WORLD); - ret = H5Dget_space_status(dset_id, &space_status); - VRFY((ret == SUCCEED), "H5Dread"); + if (vol_is_native) { + ret = H5Dget_space_status(dset_id, &space_status); + VRFY((ret == SUCCEED), "H5Dread"); - VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "file space allocation status verification succeeded"); + VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), + "file space allocation status verification succeeded"); - alloc_size = H5Dget_storage_size(dset_id); - VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), "file space allocation size verification succeeded"); + alloc_size = H5Dget_storage_size(dset_id); + VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), + "file space allocation size verification succeeded"); + } memset(read_data, 0, dset_dims[0] * sizeof(int)); memset(correct_data, 255, dset_dims[0] * sizeof(int)); @@ -680,14 +724,16 @@ test_chunk_alloc_incr_ser_to_par(void) dset_id = H5Dopen2(fid, "dset_filter", H5P_DEFAULT); VRFY((dset_id >= 0), "H5Dopen2"); - ret = H5Dget_space_status(dset_id, &space_status); - VRFY((ret == SUCCEED), "H5Dread"); + if (vol_is_native) { + ret = H5Dget_space_status(dset_id, &space_status); + VRFY((ret == SUCCEED), "H5Dread"); - VRFY((space_status == H5D_SPACE_STATUS_NOT_ALLOCATED), - "file space allocation status verification succeeded"); + VRFY((space_status == H5D_SPACE_STATUS_NOT_ALLOCATED), + "file space allocation status verification succeeded"); - alloc_size = H5Dget_storage_size(dset_id); - VRFY((0 == alloc_size), "file space allocation size verification succeeded"); + alloc_size = H5Dget_storage_size(dset_id); + VRFY((0 == alloc_size), "file space allocation size verification succeeded"); + } memset(read_data, 255, dset_dims[0] * sizeof(int)); memset(correct_data, 0, dset_dims[0] * sizeof(int)); @@ -723,13 +769,17 @@ test_chunk_alloc_incr_ser_to_par(void) MPI_Barrier(MPI_COMM_WORLD); - ret = H5Dget_space_status(dset_id, &space_status); - VRFY((ret == SUCCEED), "H5Dread"); + if (vol_is_native) { + ret = H5Dget_space_status(dset_id, &space_status); + VRFY((ret == SUCCEED), "H5Dread"); - VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "file space allocation status verification succeeded"); + VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), + "file space allocation status verification succeeded"); - alloc_size = H5Dget_storage_size(dset_id); - VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), "file space allocation size verification succeeded"); + alloc_size = H5Dget_storage_size(dset_id); + VRFY(((dset_dims[0] * sizeof(int)) == alloc_size), + "file space allocation size verification succeeded"); + } memset(read_data, 0, dset_dims[0] * sizeof(int)); memset(correct_data, 255, dset_dims[0] * sizeof(int)); diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index 1ff7a8e2c15..fa3459d252f 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -67,6 +67,22 @@ void coll_chunk1(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -113,6 +129,22 @@ void coll_chunk2(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -161,8 +193,24 @@ coll_chunk3(void) { const char *filename = GetTestParameters(); int mpi_size; + int mpi_rank; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); @@ -209,6 +257,22 @@ void coll_chunk4(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -256,6 +320,22 @@ void coll_chunk5(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER); @@ -305,6 +385,22 @@ void coll_chunk6(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER); @@ -352,6 +448,22 @@ void coll_chunk7(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER); @@ -399,6 +511,22 @@ void coll_chunk8(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER); @@ -446,6 +574,22 @@ void coll_chunk9(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER); @@ -493,6 +637,22 @@ void coll_chunk10(void) { const char *filename = GetTestParameters(); + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER); @@ -506,15 +666,15 @@ coll_chunk10(void) } /*------------------------------------------------------------------------- - * Function: coll_chunktest + * Function: coll_chunktest * * Purpose: The real testing routine for regular selection of collective * chunking storage testing both write and read, * If anything fails, it may be read or write. There is no * separation test between read and write. * - * Return: Success: 0 - * Failure: -1 + * Return: Success: 0 + * Failure: -1 * *------------------------------------------------------------------------- */ diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c index 9c6fc7120cf..043ecf81208 100644 --- a/testpar/t_coll_md.c +++ b/testpar/t_coll_md.c @@ -89,6 +89,19 @@ test_partial_no_selection_coll_md_read(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + filename = GetTestParameters(); fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -271,6 +284,19 @@ test_multi_chunk_io_addrmap_issue(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + filename = GetTestParameters(); fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -388,6 +414,19 @@ test_link_chunk_io_sort_chunk_issue(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + filename = GetTestParameters(); fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -531,6 +570,19 @@ test_collective_global_heap_write(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset or file flush aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + filename = GetTestParameters(); fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 83d751120e6..67d11d21513 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -301,6 +301,19 @@ dataset_writeInd(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* allocate memory for data buffer */ data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); @@ -439,6 +452,19 @@ dataset_readInd(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* allocate memory for data buffer */ data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); @@ -569,6 +595,19 @@ dataset_writeAll(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* set up the coords array selection */ num_points = (size_t)dim1; coords = (hsize_t *)malloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t)); @@ -1085,6 +1124,19 @@ dataset_readAll(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* set up the coords array selection */ num_points = (size_t)dim1; coords = (hsize_t *)malloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t)); @@ -1499,6 +1551,19 @@ extend_writeInd(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* setup chunk-size. Make sure sizes are > 0 */ chunk_dims[0] = (hsize_t)chunkdim0; chunk_dims[1] = (hsize_t)chunkdim1; @@ -1714,6 +1779,19 @@ extend_writeInd2(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* ------------------- * START AN HDF5 FILE * -------------------*/ @@ -1877,6 +1955,19 @@ extend_readInd(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* allocate memory for data buffer */ data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); @@ -2058,6 +2149,19 @@ extend_writeAll(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* setup chunk-size. Make sure sizes are > 0 */ chunk_dims[0] = (hsize_t)chunkdim0; chunk_dims[1] = (hsize_t)chunkdim1; @@ -2295,6 +2399,19 @@ extend_readAll(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* allocate memory for data buffer */ data_array1 = (DATATYPE *)malloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); @@ -2485,6 +2602,17 @@ compress_readAll(void) MPI_Comm_size(comm, &mpi_size); MPI_Comm_rank(comm, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + /* Allocate data buffer */ data_orig = (DATATYPE *)malloc((size_t)dim * sizeof(DATATYPE)); VRFY((data_orig != NULL), "data_origin1 malloc succeeded"); @@ -2677,6 +2805,17 @@ none_selection_chunk(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + /* setup chunk-size. Make sure sizes are > 0 */ chunk_dims[0] = (hsize_t)chunkdim0; chunk_dims[1] = (hsize_t)chunkdim1; @@ -2954,6 +3093,17 @@ test_actual_io_mode(int selection_mode) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + MPI_Barrier(MPI_COMM_WORLD); assert(mpi_size >= 1); @@ -3474,6 +3624,19 @@ test_no_collective_cause_mode(int selection_mode) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + MPI_Barrier(MPI_COMM_WORLD); assert(mpi_size >= 1); @@ -3739,8 +3902,6 @@ test_no_collective_cause_mode(int selection_mode) /* Release some resources */ if (sid) H5Sclose(sid); - if (fapl) - H5Pclose(fapl); if (dcpl) H5Pclose(dcpl); if (dxpl_write) @@ -3759,7 +3920,10 @@ test_no_collective_cause_mode(int selection_mode) /* clean up external file */ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) - HDremove(FILE_EXTERNAL); + H5Fdelete(FILE_EXTERNAL, fapl); + + if (fapl) + H5Pclose(fapl); return; } @@ -3845,6 +4009,19 @@ dataset_atomicity(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, basic dataset, or more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + buf_size = dim0 * dim1; /* allocate memory for data buffer */ write_buf = (int *)calloc((size_t)buf_size, sizeof(int)); @@ -4151,14 +4328,27 @@ test_dense_attr(void) herr_t status; const char *filename; - /* get filename */ - filename = (const char *)GetTestParameters(); - assert(filename != NULL); - /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, group, dataset, or attribute aren't supported with " + "this connector\n"); + fflush(stdout); + } + + return; + } + + /* get filename */ + filename = (const char *)GetTestParameters(); + assert(filename != NULL); + fpid = H5Pcreate(H5P_FILE_ACCESS); VRFY((fpid > 0), "H5Pcreate succeeded"); status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); diff --git a/testpar/t_file.c b/testpar/t_file.c index 8f8b2914a70..493e6d2d194 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -71,6 +71,18 @@ test_split_comm_access(void) /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + is_old = mpi_rank % 2; mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm); VRFY((mrc == MPI_SUCCESS), ""); @@ -771,13 +783,25 @@ test_file_properties(void) int mpi_ret; /* MPI return value */ int cmp; /* Compare value */ - filename = (const char *)GetTestParameters(); - /* set up MPI parameters */ mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded"); mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + filename = (const char *)GetTestParameters(); + mpi_ret = MPI_Info_create(&info); VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz"); @@ -964,6 +988,18 @@ test_delete(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or file more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* setup file access plist */ fapl_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c index 755831b7fd2..1790685cfe0 100644 --- a/testpar/t_file_image.c +++ b/testpar/t_file_image.c @@ -84,6 +84,20 @@ file_image_daisy_chain_test(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* setup file name */ snprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank); diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c index 01695abab1c..f001cc9e144 100644 --- a/testpar/t_filter_read.c +++ b/testpar/t_filter_read.c @@ -52,7 +52,8 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) hsize_t hs_size[2]; /* Hyperslab size */ size_t i, j; /* Local index variables */ char name[32] = "dataset"; - herr_t hrc; /* Error status */ + herr_t hrc; /* Error status */ + bool vol_is_native; int *points = NULL; /* Writing buffer for entire dataset */ int *check = NULL; /* Reading buffer for selected hyperslab */ @@ -93,6 +94,9 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); VRFY(file >= 0, "H5Fcreate"); + /* Check if native VOL is being used */ + VRFY((h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native) >= 0), "h5_using_native_vol"); + /* Create the dataset */ dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); VRFY(dataset >= 0, "H5Dcreate2"); @@ -100,8 +104,10 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points); VRFY(hrc >= 0, "H5Dwrite"); - *dset_size = H5Dget_storage_size(dataset); - VRFY(*dset_size > 0, "H5Dget_storage_size"); + if (vol_is_native) { + *dset_size = H5Dget_storage_size(dataset); + VRFY(*dset_size > 0, "H5Dget_storage_size"); + } hrc = H5Dclose(dataset); VRFY(hrc >= 0, "H5Dclose"); @@ -124,6 +130,9 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) file = H5Fopen(filename, H5F_ACC_RDWR, access_plist); VRFY((file >= 0), "H5Fopen"); + /* Check if native VOL is being used */ + VRFY((h5_using_native_vol(H5P_DEFAULT, file, &vol_is_native) >= 0), "h5_using_native_vol"); + dataset = H5Dopen2(file, name, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dopen2"); @@ -150,9 +159,11 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) } } - /* Get the storage size of the dataset */ - *dset_size = H5Dget_storage_size(dataset); - VRFY(*dset_size != 0, "H5Dget_storage_size"); + if (vol_is_native) { + /* Get the storage size of the dataset */ + *dset_size = H5Dget_storage_size(dataset); + VRFY(*dset_size != 0, "H5Dget_storage_size"); + } /* Clean up objects used for this test */ hrc = H5Dclose(dataset); @@ -194,9 +205,8 @@ test_filter_read(void) unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ herr_t hrc; const char *filename; -#ifdef H5_HAVE_FILTER_FLETCHER32 - hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */ -#endif + bool vol_is_native; + hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */ #ifdef H5_HAVE_FILTER_DEFLATE hsize_t deflate_size; /* Size of dataset with deflate filter */ @@ -208,7 +218,7 @@ test_filter_read(void) unsigned szip_pixels_per_block = 4; #endif /* H5_HAVE_FILTER_SZIP */ - hsize_t shuffle_size; /* Size of dataset with shuffle filter */ + hsize_t shuffle_size = 0; /* Size of dataset with shuffle filter */ #if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP) hsize_t combo_size; /* Size of dataset with multiple filters */ @@ -219,6 +229,24 @@ test_filter_read(void) if (VERBOSE_MED) printf("Parallel reading of dataset written with filters %s\n", filename); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf( + " API functions for basic file, dataset or filter aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + + /* Check if native VOL is being used */ + VRFY(h5_using_native_vol(H5P_DEFAULT, H5I_INVALID_HID, &vol_is_native) >= 0, "h5_using_native_vol"); + /*---------------------------------------------------------- * STEP 0: Test without filters. *---------------------------------------------------------- @@ -258,7 +286,6 @@ test_filter_read(void) * STEP 1: Test Fletcher32 Checksum by itself. *---------------------------------------------------------- */ -#ifdef H5_HAVE_FILTER_FLETCHER32 dc = H5Pcreate(H5P_DATASET_CREATE); VRFY(dc >= 0, "H5Pset_filter"); @@ -273,14 +300,14 @@ test_filter_read(void) VRFY(hrc >= 0, "H5Pset_filter"); filter_read_internal(filename, dc, &fletcher32_size); - VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect."); + + if (vol_is_native) + VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect."); /* Clean up objects used for this test */ hrc = H5Pclose(dc); VRFY(hrc >= 0, "H5Pclose"); -#endif /* H5_HAVE_FILTER_FLETCHER32 */ - /*---------------------------------------------------------- * STEP 2: Test deflation by itself. *---------------------------------------------------------- @@ -349,7 +376,9 @@ test_filter_read(void) VRFY(hrc >= 0, "H5Pset_shuffle"); filter_read_internal(filename, dc, &shuffle_size); - VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size."); + + if (vol_is_native) + VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size."); /* Clean up objects used for this test */ hrc = H5Pclose(dc); diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 582e4412c7f..b9cb4cc5729 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -84,6 +84,17 @@ zero_dim_dset(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + filename = GetTestParameters(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -156,6 +167,17 @@ multiple_dset_write(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + outme = malloc((size_t)size * (size_t)size * sizeof(double)); VRFY((outme != NULL), "malloc succeeded for outme"); @@ -235,6 +257,17 @@ compact_dataset(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + outme = malloc((size_t)((size_t)size * (size_t)size * sizeof(double))); VRFY((outme != NULL), "malloc succeeded for outme"); @@ -357,6 +390,19 @@ null_dataset(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or attribute aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + filename = GetTestParameters(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -456,12 +502,24 @@ big_dataset(void) hsize_t file_dims[4]; /* Dimensions of dataspace */ char dname[] = "dataset"; /* Name of dataset */ MPI_Offset file_size; /* Size of file on disk */ - herr_t ret; /* Generic return value */ + bool vol_is_native; + herr_t ret; /* Generic return value */ const char *filename; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + /* Verify MPI_Offset can handle larger than 2GB sizes */ VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4"); @@ -476,6 +534,9 @@ big_dataset(void) iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((iof >= 0), "H5Fcreate succeeded"); + /* Check if native VOL is being used */ + VRFY((h5_using_native_vol(H5P_DEFAULT, iof, &vol_is_native) >= 0), "h5_using_native_vol"); + /* Define dataspace for 2GB dataspace */ file_dims[0] = 2; file_dims[1] = 1024; @@ -495,9 +556,11 @@ big_dataset(void) ret = H5Fclose(iof); VRFY((ret >= 0), "H5Fclose succeeded"); - /* Check that file of the correct size was created */ - file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)"); + if (vol_is_native) { + /* Check that file of the correct size was created */ + file_size = h5_get_file_size(filename, fapl); + VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)"); + } /* * Create >4GB HDF5 file @@ -524,9 +587,11 @@ big_dataset(void) ret = H5Fclose(iof); VRFY((ret >= 0), "H5Fclose succeeded"); - /* Check that file of the correct size was created */ - file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)"); + if (vol_is_native) { + /* Check that file of the correct size was created */ + file_size = h5_get_file_size(filename, fapl); + VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)"); + } /* * Create >8GB HDF5 file @@ -553,9 +618,11 @@ big_dataset(void) ret = H5Fclose(iof); VRFY((ret >= 0), "H5Fclose succeeded"); - /* Check that file of the correct size was created */ - file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)"); + if (vol_is_native) { + /* Check that file of the correct size was created */ + file_size = h5_get_file_size(filename, fapl); + VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)"); + } /* Close fapl */ ret = H5Pclose(fapl); @@ -594,6 +661,17 @@ dataset_fillvalue(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + filename = GetTestParameters(); /* Set the dataset dimension to be one row more than number of processes */ @@ -842,6 +920,19 @@ collective_group_write(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + size = get_size(); chunk_size[0] = (hsize_t)(size / 2); @@ -935,6 +1026,19 @@ independent_group_read(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); H5Pset_all_coll_metadata_ops(plist, false); @@ -1055,6 +1159,19 @@ multiple_group_write(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, group, dataset, or attribute aren't supported with " + "this connector\n"); + fflush(stdout); + } + + return; + } + size = get_size(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -1210,6 +1327,19 @@ multiple_group_read(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, group, dataset, or attribute aren't supported with " + "this connector\n"); + fflush(stdout); + } + + return; + } + size = get_size(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -1526,6 +1656,19 @@ io_mode_confusion(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } + /* * Set up file access property list with parallel I/O access */ @@ -1775,6 +1918,20 @@ rr_obj_hdr_flush_confusion(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file, dataset, attribute, dataset more, attribute more, or " + "file flush aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + assert(mpi_size > 2); is_reader = mpi_rank % 2; @@ -2551,12 +2708,24 @@ chunk_align_bug_1(void) hid_t file_id, dset_id, fapl_id, dcpl_id, space_id; hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE; h5_stat_size_t file_size; - hsize_t align; + hsize_t align = 1; + bool vol_is_native; herr_t ret; const char *filename; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + filename = (const char *)GetTestParameters(); /* Create file without alignment */ @@ -2565,18 +2734,23 @@ chunk_align_bug_1(void) file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); VRFY((file_id >= 0), "H5Fcreate succeeded"); + /* Check if native VOL is being used */ + VRFY((h5_using_native_vol(H5P_DEFAULT, file_id, &vol_is_native) >= 0), "h5_using_native_vol"); + /* Close file */ ret = H5Fclose(file_id); VRFY((ret >= 0), "H5Fclose succeeded"); - /* Get file size */ - file_size = h5_get_file_size(filename, fapl_id); - VRFY((file_size >= 0), "h5_get_file_size succeeded"); + if (vol_is_native) { + /* Get file size */ + file_size = h5_get_file_size(filename, fapl_id); + VRFY((file_size >= 0), "h5_get_file_size succeeded"); - /* Calculate alignment value, set to allow a chunk to squeak in between the - * original EOF and the aligned location of the aggregator. Add some space - * for the dataset metadata */ - align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN; + /* Calculate alignment value, set to allow a chunk to squeak in between the + * original EOF and the aligned location of the aggregator. Add some space + * for the dataset metadata */ + align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN; + } /* Set aggregator size and alignment, disable metadata aggregator */ assert(AGGR_SIZE > CHUNK_SIZE); diff --git a/testpar/t_prop.c b/testpar/t_prop.c index de36abfc23e..23710d76fca 100644 --- a/testpar/t_prop.c +++ b/testpar/t_prop.c @@ -52,6 +52,7 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) void *rbuf; MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status); + VRFY((recv_size >= 0), "MPI_Recv succeeded"); buf_size = (size_t)recv_size; rbuf = (uint8_t *)malloc(buf_size); MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status); diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c index b0b5da71fbf..47c78d08d62 100644 --- a/testpar/t_pshutdown.c +++ b/testpar/t_pshutdown.c @@ -52,6 +52,25 @@ main(int argc, char **argv) /* Set up file access property list with parallel I/O access */ fapl = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl >= 0), "H5Pcreate succeeded"); + + /* Get the capability flag of the VOL connector being used */ + ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g); + VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded"); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf( + " API functions for basic file, group, or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + MPI_Finalize(); + return 0; + } + ret = H5Pset_fapl_mpio(fapl, comm, info); VRFY((ret >= 0), ""); diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index 0a3d3d0a49e..4f48f931c93 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -24,6 +24,21 @@ #include "H5Spkg.h" /* Dataspaces */ #include "testphdf5.h" +#ifndef PATH_MAX +#define PATH_MAX 512 +#endif + +/* FILENAME and filenames must have the same number of names. + * Use PARATESTFILE in general and use a separated filename only if the file + * created in one test is accessed by a different test. + * filenames[0] is reserved as the file name for PARATESTFILE. + */ +#define NFILENAME 2 +#define PARATESTFILE filenames[0] +const char *FILENAME[NFILENAME] = {"ShapeSameTest", NULL}; +char *filenames[NFILENAME]; +hid_t fapl; /* file access property list */ + /* On Lustre (and perhaps other parallel file systems?), we have severe * slow downs if two or more processes attempt to access the same file system * block. To minimize this problem, we set alignment in the shape same tests @@ -1685,7 +1700,8 @@ static void contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size, const int small_rank, const int large_rank, const bool use_collective_io, const hid_t dset_type, int express_test, int *skips_ptr, int max_skips, - int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr) + int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, + int mpi_rank) { #if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG const char *fcnName = "contig_hs_dr_pio_test__run_test()"; @@ -1751,6 +1767,10 @@ contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i /* int64_t tests_skipped = */ 0}; struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; + if (MAINPROCESS) + printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), + small_rank, large_rank); + hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io, dset_type, express_test, tv_ptr); @@ -1923,9 +1943,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* contiguous data set, independent I/O */ chunk_edge_size = 0; - contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, - large_rank, false, dset_type, express_test, &skips, - max_skips, &total_tests, &tests_run, &tests_skipped); + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; /* end of case IND_CONTIG */ @@ -1934,9 +1954,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* contiguous data set, collective I/O */ chunk_edge_size = 0; - contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, - large_rank, true, dset_type, express_test, &skips, - max_skips, &total_tests, &tests_run, &tests_skipped); + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; /* end of case COL_CONTIG */ @@ -1945,9 +1965,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* chunked data set, independent I/O */ chunk_edge_size = 5; - contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, - large_rank, false, dset_type, express_test, &skips, - max_skips, &total_tests, &tests_run, &tests_skipped); + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; /* end of case IND_CHUNKED */ @@ -1956,9 +1976,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* chunked data set, collective I/O */ chunk_edge_size = 5; - contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, - large_rank, true, dset_type, express_test, &skips, - max_skips, &total_tests, &tests_run, &tests_skipped); + contig_hs_dr_pio_test__run_test( + test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type, + express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank); test_num++; break; /* end of case COL_CHUNKED */ @@ -1977,9 +1997,13 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) } } - if ((MAINPROCESS) && (tests_skipped > 0)) { - fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", - tests_skipped, total_tests); + if (MAINPROCESS) { + if (tests_skipped > 0) { + fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", + tests_skipped, total_tests); + } + else + printf("\n"); } return; @@ -3609,7 +3633,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i const int chunk_edge_size, const int small_rank, const int large_rank, const bool use_collective_io, const hid_t dset_type, const int express_test, int *skips_ptr, int max_skips, int64_t *total_tests_ptr, - int64_t *tests_run_ptr, int64_t *tests_skipped_ptr) + int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, int mpi_rank) { #if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG @@ -3676,6 +3700,10 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i /* int64_t tests_skipped = */ 0}; struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; + if (MAINPROCESS) + printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1), + small_rank, large_rank); + hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, use_collective_io, dset_type, express_test, tv_ptr); @@ -3840,7 +3868,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, - &tests_skipped); + &tests_skipped, mpi_rank); test_num++; break; /* end of case IND_CONTIG */ @@ -3848,9 +3876,10 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case COL_CONTIG: /* contiguous data set, collective I/O */ chunk_edge_size = 0; - ckrbrd_hs_dr_pio_test__run_test( - test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, true, - dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped); + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, true, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, + &tests_skipped, mpi_rank); test_num++; break; /* end of case COL_CONTIG */ @@ -3861,7 +3890,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, - &tests_skipped); + &tests_skipped, mpi_rank); test_num++; break; /* end of case IND_CHUNKED */ @@ -3869,9 +3898,10 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case COL_CHUNKED: /* chunked data set, collective I/O */ chunk_edge_size = 5; - ckrbrd_hs_dr_pio_test__run_test( - test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, true, - dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped); + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, true, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, + &tests_skipped, mpi_rank); test_num++; break; /* end of case COL_CHUNKED */ @@ -3890,9 +3920,13 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) } } - if ((MAINPROCESS) && (tests_skipped > 0)) { - fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", - tests_skipped, total_tests); + if (MAINPROCESS) { + if (tests_skipped > 0) { + fprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", + tests_skipped, total_tests); + } + else + printf("\n"); } return; @@ -3905,12 +3939,6 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) * Main driver of the Parallel HDF5 tests */ -#include "testphdf5.h" - -#ifndef PATH_MAX -#define PATH_MAX 512 -#endif /* !PATH_MAX */ - /* global variables */ int dim0; int dim1; @@ -3928,17 +3956,6 @@ void *old_client_data; /* previous error handler arg.*/ /* other option flags */ -/* FILENAME and filenames must have the same number of names. - * Use PARATESTFILE in general and use a separated filename only if the file - * created in one test is accessed by a different test. - * filenames[0] is reserved as the file name for PARATESTFILE. - */ -#define NFILENAME 2 -#define PARATESTFILE filenames[0] -const char *FILENAME[NFILENAME] = {"ShapeSameTest", NULL}; -char *filenames[NFILENAME]; -hid_t fapl; /* file access property list */ - #ifdef USE_PAUSE /* pause the process for a moment to allow debugger to attach if desired. */ /* Will pause more if greenlight file is not present but will eventually */ @@ -4289,6 +4306,28 @@ main(int argc, char **argv) H5open(); h5_show_hostname(); + fapl = H5Pcreate(H5P_FILE_ACCESS); + + /* Get the capability flag of the VOL connector being used */ + if (H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) < 0) { + if (MAINPROCESS) + printf("Failed to get the capability flag of the VOL connector being used\n"); + + MPI_Finalize(); + return -1; + } + + /* Make sure the connector supports the API functions being tested. This test only + * uses a few API functions, such as H5Fcreate/close/delete, H5Dcreate/write/read/close, + */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) + printf("API functions for basic file and dataset aren't supported with this connector\n"); + + MPI_Finalize(); + return 0; + } + memset(filenames, 0, sizeof(filenames)); for (int i = 0; i < NFILENAME; i++) { if (NULL == (filenames[i] = malloc(PATH_MAX))) { @@ -4316,7 +4355,6 @@ main(int argc, char **argv) TestInfo(argv[0]); /* setup file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* Parse command line arguments */ @@ -4343,6 +4381,8 @@ main(int argc, char **argv) /* Clean up test files */ h5_clean_files(FILENAME, fapl); + H5Pclose(fapl); + nerrors += GetTestNumErrs(); /* Gather errors from all processes */ diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c index e4ff25836d7..b381ef5d77c 100644 --- a/testpar/t_span_tree.c +++ b/testpar/t_span_tree.c @@ -21,7 +21,7 @@ one in collective mode, 2) We will read two datasets with the same hyperslab selection settings, 1. independent read to read independent output, - independent read to read collecive output, + independent read to read collective output, Compare the result, If the result is the same, then collective write succeeds. 2. collective read to read independent output, @@ -54,6 +54,22 @@ static void coll_read_test(void); void coll_irregular_cont_write(void) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_write_test(0); } @@ -73,6 +89,22 @@ coll_irregular_cont_write(void) void coll_irregular_cont_read(void) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_read_test(); } @@ -92,6 +124,22 @@ coll_irregular_cont_read(void) void coll_irregular_simple_chunk_write(void) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_write_test(1); } @@ -111,6 +159,22 @@ coll_irregular_simple_chunk_write(void) void coll_irregular_simple_chunk_read(void) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_read_test(); } @@ -130,6 +194,22 @@ coll_irregular_simple_chunk_read(void) void coll_irregular_complex_chunk_write(void) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_write_test(4); } @@ -149,6 +229,22 @@ coll_irregular_complex_chunk_write(void) void coll_irregular_complex_chunk_read(void) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || + !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file dataset, or dataset more aren't supported with this " + "connector\n"); + fflush(stdout); + } + + return; + } coll_read_test(); } @@ -1775,6 +1871,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_col ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0); VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); + /* sync with the other processes before reading data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); + /* read the small data set back to verify that it contains the * expected data. Note that each process reads in the entire * data set and verifies it. @@ -2254,6 +2354,20 @@ lower_dim_size_comp_test(void) /* const char *fcnName = "lower_dim_size_comp_test()"; */ int chunk_edge_size = 0; int use_collective_io; + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) { @@ -2331,6 +2445,17 @@ link_chunk_collective_io_test(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + /* Make sure the connector supports the API functions being tested */ + if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) { + if (MAINPROCESS) { + puts("SKIPPED"); + printf(" API functions for basic file or dataset aren't supported with this connector\n"); + fflush(stdout); + } + + return; + } + assert(mpi_size > 0); /* get the file name */ diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index e094ad6dcd3..57ef5c9bbd4 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -234,7 +234,7 @@ parse_options(int argc, char **argv) nerrors++; return (1); } - if (mpi_rank == 0) { + if (MAINPROCESS) { printf("Test filenames are:\n"); for (i = 0; i < n; i++) printf(" %s\n", filenames[i]); @@ -346,6 +346,15 @@ main(int argc, char **argv) } } + /* Set up file access property list with parallel I/O access */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + + vol_cap_flags_g = H5VL_CAP_FLAG_NONE; + + /* Get the capability flag of the VOL connector being used */ + VRFY((H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) >= 0), "H5Pget_vol_cap_flags succeeded"); + /* Initialize testing framework */ TestInit(argv[0], usage, parse_options); @@ -534,7 +543,6 @@ main(int argc, char **argv) TestInfo(argv[0]); /* setup file access property list */ - fapl = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* Parse command line arguments */ @@ -561,6 +569,8 @@ main(int argc, char **argv) /* Clean up test files */ h5_clean_files(FILENAME, fapl); + H5Pclose(fapl); + nerrors += GetTestNumErrs(); /* Gather errors from all processes */ From 378e99079bd8904e96a9c68b4203becf642fc3de Mon Sep 17 00:00:00 2001 From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Date: Mon, 13 Nov 2023 13:52:31 -0600 Subject: [PATCH 098/101] cmakedefine macro uses C comments with "#undef" (#3845) * Correct CMake macro name * Use ifdef for H5_HAVE_FLOAT128 like elsewhere --- fortran/src/CMakeLists.txt | 17 ++++++++++++++++- fortran/src/H5_ff.F90 | 2 +- fortran/src/H5config_f.inc.cmake | 28 ++++++++++++++++++++++++---- 3 files changed, 41 insertions(+), 6 deletions(-) diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index d054503f0d6..87557db82ab 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -37,10 +37,25 @@ if (H5_HAVE_PARALLEL) endif () set (CMAKE_H5_HAVE_FLOAT128 0) -if (HAVE_FLOAT128) +if (H5_HAVE_FLOAT128) set (CMAKE_H5_HAVE_FLOAT128 1) endif () +set (CMAKE_H5_FORTRAN_HAVE_STORAGE_SIZE 0) +if (H5_FORTRAN_HAVE_STORAGE_SIZE) + set (CMAKE_H5_FORTRAN_HAVE_STORAGE_SIZE 1) +endif () + +set (CMAKE_H5_FORTRAN_HAVE_SIZEOF 0) +if (H5_FORTRAN_HAVE_SIZEOF) + set (CMAKE_H5_FORTRAN_HAVE_SIZEOF 1) +endif () + +set (CMAKE_H5_FORTRAN_HAVE_C_SIZEOF 0) +if (H5_FORTRAN_HAVE_C_SIZEOF) + set (CMAKE_H5_FORTRAN_HAVE_C_SIZEOF 1) +endif () + configure_file (${HDF5_F90_SRC_SOURCE_DIR}/H5config_f.inc.cmake ${HDF5_F90_BINARY_DIR}/H5config_f.inc @ONLY) configure_file (${HDF5_F90_SRC_SOURCE_DIR}/H5fort_type_defines.h.cmake ${HDF5_F90_BINARY_DIR}/H5fort_type_defines.h @ONLY) diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index 53156731946..05a48ace48c 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -996,7 +996,7 @@ INTEGER(HID_T) FUNCTION h5kind_to_type(ikind, flag) RESULT(h5_type) h5_type = H5T_NATIVE_REAL_C_LONG_DOUBLE #endif #if H5_PAC_FC_MAX_REAL_PRECISION > 28 -#if H5_HAVE_FLOAT128 == 1 +#ifdef H5_HAVE_FLOAT128 ELSE h5_type = H5T_NATIVE_FLOAT_128 #endif diff --git a/fortran/src/H5config_f.inc.cmake b/fortran/src/H5config_f.inc.cmake index 0f274dbcbd4..665207641ee 100644 --- a/fortran/src/H5config_f.inc.cmake +++ b/fortran/src/H5config_f.inc.cmake @@ -36,13 +36,28 @@ #endif ! Define if the intrinsic function STORAGE_SIZE exists -#cmakedefine H5_FORTRAN_HAVE_STORAGE_SIZE @H5_FORTRAN_HAVE_STORAGE_SIZE@ +#cmakedefine01 CMAKE_H5_FORTRAN_HAVE_STORAGE_SIZE +#if CMAKE_H5_FORTRAN_HAVE_STORAGE_SIZE == 0 +#undef H5_FORTRAN_HAVE_STORAGE_SIZE +#else +#define H5_FORTRAN_HAVE_STORAGE_SIZE +#endif ! Define if the intrinsic function SIZEOF exists -#cmakedefine H5_FORTRAN_HAVE_SIZEOF @H5_FORTRAN_HAVE_SIZEOF@ +#cmakedefine01 CMAKE_H5_FORTRAN_HAVE_SIZEOF +#if CMAKE_H5_FORTRAN_HAVE_SIZEOF == 0 +#undef H5_FORTRAN_HAVE_SIZEOF +#else +#define H5_FORTRAN_HAVE_SIZEOF +#endif ! Define if the intrinsic function C_SIZEOF exists -#cmakedefine H5_FORTRAN_HAVE_C_SIZEOF @H5_FORTRAN_HAVE_C_SIZEOF@ +#cmakedefine01 CMAKE_H5_FORTRAN_HAVE_C_SIZEOF +#if CMAKE_H5_FORTRAN_HAVE_C_SIZEOF == 0 +#undef H5_FORTRAN_HAVE_C_SIZEOF +#else +#define H5_FORTRAN_HAVE_C_SIZEOF +#endif ! Define if the intrinsic function C_LONG_DOUBLE exists #define H5_FORTRAN_HAVE_C_LONG_DOUBLE @H5_FORTRAN_HAVE_C_LONG_DOUBLE@ @@ -63,7 +78,12 @@ #define H5_PAC_FC_MAX_REAL_PRECISION @H5_PAC_FC_MAX_REAL_PRECISION@ ! If C has quad precision -#cmakedefine H5_HAVE_FLOAT128 @H5_HAVE_FLOAT128@ +#cmakedefine01 CMAKE_H5_HAVE_FLOAT128 +#if CMAKE_H5_HAVE_FLOAT128 == 0 +#undef H5_HAVE_FLOAT128 +#else +#define H5_HAVE_FLOAT128 +#endif ! Define if INTEGER*16 is available #define H5_HAVE_Fortran_INTEGER_SIZEOF_16 @H5_HAVE_Fortran_INTEGER_SIZEOF_16@ From b5bfea2cc3b84ac91b43537aa92a7ec55a870767 Mon Sep 17 00:00:00 2001 From: mattjala <124107509+mattjala@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:31:24 -0600 Subject: [PATCH 099/101] Make filter unregister callbacks safe for VOL connectors (#3629) * Make filter callbacks use top-level API functions When using VOL connectors, H5I_iterate may not provide valid object pointers to its callback. This change keeps existing functionality in H5Zunregister() without using potentially unsafe pointers. * Filter callbacks use internal API * Skip MPI work on non-native VOL --- src/H5Z.c | 142 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 98 insertions(+), 44 deletions(-) diff --git a/src/H5Z.c b/src/H5Z.c index b514f628887..720aa439c36 100644 --- a/src/H5Z.c +++ b/src/H5Z.c @@ -485,19 +485,30 @@ H5Z__check_unregister(hid_t ocpl_id, H5Z_filter_t filter_id) *------------------------------------------------------------------------- */ static int -H5Z__check_unregister_group_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void *key) +H5Z__check_unregister_group_cb(void H5_ATTR_UNUSED *obj_ptr, hid_t obj_id, void *key) { - hid_t ocpl_id = -1; - H5Z_object_t *object = (H5Z_object_t *)key; - htri_t filter_in_pline = false; - int ret_value = false; /* Return value */ + hid_t ocpl_id = -1; + H5Z_object_t *object = (H5Z_object_t *)key; + H5VL_object_t *vol_obj; /* Object for loc_id */ + H5VL_group_get_args_t vol_cb_args; /* Arguments to VOL callback */ + htri_t filter_in_pline = false; + int ret_value = false; /* Return value */ FUNC_ENTER_PACKAGE - assert(obj_ptr); - /* Get the group creation property */ - if ((ocpl_id = H5G_get_create_plist((H5G_t *)obj_ptr)) < 0) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(obj_id, H5I_GROUP))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid group identifier"); + + /* Set up VOL callback arguments */ + vol_cb_args.op_type = H5VL_GROUP_GET_GCPL; + vol_cb_args.args.get_gcpl.gcpl_id = H5I_INVALID_HID; + + /* Get the group creation property list */ + if (H5VL_group_get(vol_obj, &vol_cb_args, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTGET, H5I_INVALID_HID, "unable to get group creation properties"); + + if ((ocpl_id = vol_cb_args.args.get_gcpl.gcpl_id) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANTGET, FAIL, "can't get group creation property list"); /* Check if the filter is in the group creation property list */ @@ -535,19 +546,30 @@ H5Z__check_unregister_group_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void *------------------------------------------------------------------------- */ static int -H5Z__check_unregister_dset_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void *key) +H5Z__check_unregister_dset_cb(void H5_ATTR_UNUSED *obj_ptr, hid_t obj_id, void *key) { - hid_t ocpl_id = -1; - H5Z_object_t *object = (H5Z_object_t *)key; - htri_t filter_in_pline = false; - int ret_value = false; /* Return value */ + hid_t ocpl_id = -1; + H5Z_object_t *object = (H5Z_object_t *)key; + H5VL_object_t *vol_obj; /* Object for loc_id */ + H5VL_dataset_get_args_t vol_cb_args; /* Arguments to VOL callback */ + htri_t filter_in_pline = false; + int ret_value = false; /* Return value */ FUNC_ENTER_PACKAGE - assert(obj_ptr); - /* Get the dataset creation property */ - if ((ocpl_id = H5D_get_create_plist((H5D_t *)obj_ptr)) < 0) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(obj_id, H5I_DATASET))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid dataset identifier"); + + /* Set up VOL callback arguments */ + vol_cb_args.op_type = H5VL_DATASET_GET_DCPL; + vol_cb_args.args.get_dcpl.dcpl_id = H5I_INVALID_HID; + + /* Get the dataset creation property list */ + if (H5VL_dataset_get(vol_obj, &vol_cb_args, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, H5I_INVALID_HID, "unable to get dataset creation properties"); + + if ((ocpl_id = vol_cb_args.args.get_dcpl.dcpl_id) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANTGET, FAIL, "can't get dataset creation property list"); /* Check if the filter is in the dataset creation property list */ @@ -581,51 +603,83 @@ H5Z__check_unregister_dset_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void * *------------------------------------------------------------------------- */ static int -H5Z__flush_file_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void H5_ATTR_PARALLEL_USED *key) +H5Z__flush_file_cb(void H5_ATTR_UNUSED *obj_ptr, hid_t obj_id, void H5_ATTR_PARALLEL_USED *key) { - H5F_t *f = (H5F_t *)obj_ptr; /* File object for operations */ + #ifdef H5_HAVE_PARALLEL H5Z_object_t *object = (H5Z_object_t *)key; -#endif /* H5_HAVE_PARALLEL */ - int ret_value = false; /* Return value */ +#endif /* H5_HAVE_PARALLEL */ + int ret_value = false; /* Return value */ + H5VL_file_specific_args_t vol_cb_args_specific; /* Arguments to VOL callback */ + H5VL_object_t *vol_obj; /* File for file_id */ + H5VL_file_get_args_t vol_cb_args; /* Arguments to VOL callback */ + bool is_native_vol_obj = true; + unsigned int intent = 0; FUNC_ENTER_PACKAGE /* Sanity checks */ - assert(obj_ptr); assert(key); + /* Get the internal file structure */ + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(obj_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); + + /* Get intent */ + vol_cb_args.op_type = H5VL_FILE_GET_INTENT; + vol_cb_args.args.get_intent.flags = &intent; + + /* Get the flags */ + if (H5VL_file_get(vol_obj, &vol_cb_args, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file's intent flags"); + + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, H5I_INVALID_HID, + "can't determine if VOL object is native connector object"); + /* Do a global flush if the file is opened for write */ - if (H5F_ACC_RDWR & H5F_INTENT(f)) { + if (H5F_ACC_RDWR & intent) { #ifdef H5_HAVE_PARALLEL - /* Check if MPIO driver is used */ - if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { - /* Sanity check for collectively calling H5Zunregister, if requested */ - /* (Sanity check assumes that a barrier on one file's comm - * is sufficient (i.e. that there aren't different comms for - * different files). -QAK, 2018/02/14) - */ - if (H5_coll_api_sanity_check_g && !object->sanity_checked) { - MPI_Comm mpi_comm; /* File's communicator */ + /* Checking MPI flag requires native VOL */ + if (is_native_vol_obj) { + H5F_t *f = (H5F_t *)obj_ptr; /* File object for native VOL operation */ + + /* Check if MPIO driver is used */ + if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { - /* Retrieve the file communicator */ - if (MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(f))) - HGOTO_ERROR(H5E_PLINE, H5E_CANTGET, FAIL, "can't get MPI communicator"); + /* Sanity check for collectively calling H5Zunregister, if requested */ + /* (Sanity check assumes that a barrier on one file's comm + * is sufficient (i.e. that there aren't different comms for + * different files). -QAK, 2018/02/14) + */ + if (H5_coll_api_sanity_check_g && !object->sanity_checked) { + MPI_Comm mpi_comm; /* File's communicator */ - /* Issue the barrier */ - if (mpi_comm != MPI_COMM_NULL) - MPI_Barrier(mpi_comm); + /* Retrieve the file communicator */ + if (H5F_mpi_retrieve_comm(obj_id, H5P_DEFAULT, &mpi_comm) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTGET, FAIL, "can't get MPI communicator"); - /* Set the "sanity checked" flag */ - object->sanity_checked = true; - } /* end if */ - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ + /* Issue the barrier */ + if (mpi_comm != MPI_COMM_NULL) + MPI_Barrier(mpi_comm); + + /* Set the "sanity checked" flag */ + object->sanity_checked = true; + } /* end if */ + } /* end if */ + } +#endif /* H5_HAVE_PARALLEL */ /* Call the flush routine for mounted file hierarchies */ - if (H5F_flush_mounts((H5F_t *)obj_ptr) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_CANTFLUSH, FAIL, "unable to flush file hierarchy"); + vol_cb_args_specific.op_type = H5VL_FILE_FLUSH; + vol_cb_args_specific.args.flush.obj_type = H5I_FILE; + vol_cb_args_specific.args.flush.scope = H5F_SCOPE_GLOBAL; + + /* Flush the object */ + if (H5VL_file_specific(vol_obj, &vol_cb_args_specific, H5P_DATASET_XFER_DEFAULT, NULL) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file hierarchy"); + } /* end if */ done: From 2512bdd7a984eadebc50812e038f08afd723ba02 Mon Sep 17 00:00:00 2001 From: "H. Joe Lee" Date: Tue, 14 Nov 2023 09:58:20 -0600 Subject: [PATCH 100/101] Add extra space in comments for consistency (#3852) * Add extra space in comments for consistency * uncomment tfloatsattrs test --- tools/test/h5dump/CMakeTests.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/test/h5dump/CMakeTests.cmake b/tools/test/h5dump/CMakeTests.cmake index 7ed021fce1c..8cc9a5260ab 100644 --- a/tools/test/h5dump/CMakeTests.cmake +++ b/tools/test/h5dump/CMakeTests.cmake @@ -1027,7 +1027,7 @@ ADD_H5_TEST (tbitnopaque_le 0 --enable-error-stack tbitnopaque.h5) endif () - #test for the nested compound type + # test for the nested compound type ADD_H5_TEST (tnestcomp-1 0 --enable-error-stack tnestedcomp.h5) ADD_H5_TEST (tnestedcmpddt 0 --enable-error-stack tnestedcmpddt.h5) @@ -1056,7 +1056,7 @@ ADD_H5_TEST (tvldtypes4 0 --enable-error-stack tvldtypes4.h5) ADD_H5_TEST (tvldtypes5 0 --enable-error-stack tvldtypes5.h5) - #test for file with variable length string data + # test for file with variable length string data ADD_H5_TEST (tvlstr 0 --enable-error-stack tvlstr.h5) ADD_H5_TEST (tvlenstr_array 0 --enable-error-stack tvlenstr_array.h5) From f39b228a4cda701c363d4c32dced4065dd86dc3e Mon Sep 17 00:00:00 2001 From: jhendersonHDF Date: Tue, 14 Nov 2023 10:26:36 -0600 Subject: [PATCH 101/101] Update Actions badges to link to relevant workflow (#3850) --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 28d8d7a1b4d..45d6bed8e97 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,12 @@ HDF5 version 1.15.0 currently under development ![HDF5 Logo](doxygen/img/HDF5.png) -[![develop build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=develop&label=develop)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) -[![HDF-EOS5 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/hdfeos5.yml?branch=develop&label=HDF-EOS5)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) -[![netCDF build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/netcdf.yml?branch=develop&label=netCDF)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) -[![h5py build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/h5py.yml?branch=develop&label=h5py)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) -[![CVE regression](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cve.yml?branch=develop&label=CVE)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Adevelop) -[![1.14 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_14&label=1.14)](https://github.com/HDFGroup/hdf5/actions?query=branch%3Ahdf5_1_14) +[![develop build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=develop&label=develop)](https://github.com/HDFGroup/hdf5/actions/workflows/main.yml?query=branch%3Adevelop) +[![HDF-EOS5 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/hdfeos5.yml?branch=develop&label=HDF-EOS5)](https://github.com/HDFGroup/hdf5/actions/workflows/hdfeos5.yml?query=branch%3Adevelop) +[![netCDF build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/netcdf.yml?branch=develop&label=netCDF)](https://github.com/HDFGroup/hdf5/actions/workflows/netcdf.yml?query=branch%3Adevelop) +[![h5py build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/h5py.yml?branch=develop&label=h5py)](https://github.com/HDFGroup/hdf5/actions/workflows/h5py.yml?query=branch%3Adevelop) +[![CVE regression](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cve.yml?branch=develop&label=CVE)](https://github.com/HDFGroup/hdf5/actions/workflows/cve.yml?query=branch%3Adevelop) +[![1.14 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/main.yml?branch=hdf5_1_14&label=1.14)](https://github.com/HDFGroup/hdf5/actions/workflows/main.yml?query=branch%3Ahdf5_1_14) [![BSD](https://img.shields.io/badge/License-BSD-blue.svg)](https://github.com/HDFGroup/hdf5/blob/develop/COPYING) [HPC configure/build/test results](https://my.cdash.org/index.php?project=HDF5)