sandbox/acastillo/output_fields/vtkhdf/output_vtkhdf_helpers.h
- Helper functions for
output_vtkhdf.h
- Count points and cells in each subdomain and total
- Calculate offsets for points and cells in each subdomain
- Initialize marker to rebuild the topology
- Populate points_dset based on markers and dimensions
- Populate types_dset
- Populate scalar_dset using the the scalar s
- Populate vector_dset using the vector v
- Populate offsets_dset
- Populate topo_dset based on markers and dimensions
- Write Dataset
Helper functions for output_vtkhdf.h
Count points and cells in each subdomain and total
void count_points_and_cells(int *num_points_glob, int *num_cells_glob, int *num_points, int *num_cells, scalar per_mask) {
foreach_vertex(serial, noauto){
(*num_points)++;
}
foreach (serial, noauto){
if (per_mask[]){
(*num_cells)++;
}
}
(num_points, num_points_glob, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(num_cells, num_cells_glob, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce}
void count_points_and_cells_slice(int *num_points_glob, int *num_cells_glob, int *num_points, int *num_cells, scalar per_mask, coord n = {0, 0, 1}, double _alpha = 0) {
foreach_vertex(serial, noauto){
(n, _alpha);
shortcut_slice(*num_points)++;
}
foreach (serial, noauto){
if (per_mask[]){
(*num_cells)++;
}
}
(num_points, num_points_glob, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(num_cells, num_cells_glob, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce}
Calculate offsets for points and cells in each subdomain
void calculate_offsets(int *offset_points, int *offset_cells, int num_points, int num_cells, hsize_t *offset) {
// Arrays to store the number of points and cells in each subdomain
int list_points[npe()];
int list_cells[npe()];
// Initialize the arrays to zero
for (int i = 0; i < npe(); ++i){
[i] = 0;
list_points[i] = 0;
list_cells}
// Set the number of points and cells for the current subdomain
[pid()] = num_points;
list_points[pid()] = num_cells;
list_cells
// Perform an all-reduce operation to gather the number of points and cells from all subdomains
(list_points, offset_points, npe(), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(list_cells, offset_cells, npe(), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce
// Calculate the offset for the points in the current subdomain
[0] = 0;
offsetif (pid() != 0){
// Sum the offsets of the previous subdomains to get the starting offset for the current subdomain
for (int i = 1; i <= pid(); ++i){
[0] += offset_points[i - 1];
offset}
}
}
void calculate_offsets2(int *offset_points, int num_points, hsize_t *offset) {
// Arrays to store the number of points and cells in each subdomain
int list_points[npe()];
// Initialize the arrays to zero
for (int i = 0; i < npe(); ++i){
[i] = 0;
list_points}
// Set the number of points and cells for the current subdomain
[pid()] = num_points;
list_points
// Perform an all-reduce operation to gather the number of points and cells from all subdomains
(list_points, offset_points, npe(), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce
// Calculate the offset for the points in the current subdomain
[0] = 0;
offsetif (pid() != 0){
// Sum the offsets of the previous subdomains to get the starting offset for the current subdomain
for (int i = 1; i <= pid(); ++i){
[0] += offset_points[i - 1];
offset}
}
}
Initialize marker to rebuild the topology
void initialize_marker(vertex scalar marker, hsize_t *offset, hsize_t accumulate = 1 ) {
int num_points = 0;
foreach_vertex(serial, noauto){
#if !TREE
#if dimension == 2
int _k = (point.i - 2) * ((1 << point.level) + 1) + (point.j - 2);
#else
int _k = (point.i - 2) * sq((1 << point.level) + 1) + (point.j - 2) * ((1 << point.level) + 1) + (point.k - 2);
#endif
#else // TREE
int _k = num_points;
#endif
[] = _k + offset[0]*accumulate;
marker++;
num_points}
.dirty = true;
marker}
void initialize_marker_slice(vertex scalar marker, hsize_t *offset, coord n = {0, 0, 1}, double _alpha = 0, hsize_t accumulate = 1) {
int num_points = 0;
foreach_vertex(serial, noauto){
[] = 0.;
marker(n, _alpha);
shortcut_slice[] = num_points + offset[0]*accumulate;
marker++;
num_points}
}
Populate points_dset based on markers and dimensions
void populate_points_dset(double **points_dset, int num_points, int *offset_points, hsize_t *count, hsize_t *offset) {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_points;
count[1] = 3;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_points[i - 1];
offset}
}
// Allocate memory for points_dset
*points_dset = (double *)malloc(count[0] * count[1] * sizeof(double));
// Iterate over each vertex
foreach_vertex(serial, noauto){
#if !TREE
#if dimension == 2
int _k = (point.i - 2) * ((1 << point.level) + 1) + (point.j - 2);
#else
int _k = (point.i - 2) * sq((1 << point.level) + 1) + (point.j - 2) * ((1 << point.level) + 1) + (point.k - 2);
#endif
#endif
// Calculate starting index
int ii = _k * 3;
// Store coordinates
(*points_dset)[ii + 0] = x;
(*points_dset)[ii + 1] = y;
#if dimension == 2
(*points_dset)[ii + 2] = 0.;
#else
(*points_dset)[ii + 2] = z;
#endif
}
}
void populate_points_dset_slice(double **points_dset, int num_points, int *offset_points, hsize_t *count,
*offset, coord n = {0, 0, 1}, double _alpha = 0)
hsize_t {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_points;
count[1] = 3;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_points[i - 1];
offset}
}
// Allocate memory for points_dset
*points_dset = (double *)malloc(count[0] * count[1] * sizeof(double));
// Iterate over each vertex
= 0;
num_points foreach_vertex(serial, noauto){
(n, _alpha);
shortcut_slice
// Calculate starting index
int ii = num_points * 3;
// Store coordinates
(*points_dset)[ii + 0] = x;
(*points_dset)[ii + 1] = y;
(*points_dset)[ii + 2] = z;
++;
num_points}
}
Populate types_dset
void populate_types_dset(char **types_dset, char type, int num_cells, int *offset_cells, hsize_t *count, hsize_t *offset) {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = 1;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
// Allocate memory for types_dset
*types_dset = (char *)malloc(count[0] * count[1] * sizeof(char));
for (int i = 0; i < num_cells; ++i){
(*types_dset)[i] = type;
}
}
Populate scalar_dset using the the scalar s
void populate_scalar_dset(scalar s, double *scalar_dset, int num_cells, int *offset_cells, hsize_t *count, hsize_t *offset, scalar per_mask) {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = 1;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
foreach (serial, noauto){
if (per_mask[]){
#if !TREE
#if dimension == 2
int _k = (point.i - 2) * ((1 << point.level)) + (point.j - 2);
#else
int _k = (point.i - 2) * sq((1 << point.level)) + (point.j - 2) * ((1 << point.level)) + (point.k - 2);
#endif
#endif
// Store values
[_k] = s[];
scalar_dset}
}
}
void populate_scalar_dset_slice(scalar s, double *scalar_dset, int num_cells, int *offset_cells, hsize_t *count,
*offset, scalar per_mask, coord n = {0, 0, 1}, double _alpha = 0)
hsize_t {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = 1;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
= 0;
num_cells foreach (serial, noauto){
if (per_mask[]){
if (n.x == 1)
[num_cells] = 0.5 * (val(s) + val(s, 1, 0, 0));
scalar_dsetelse if (n.y == 1)
[num_cells] = 0.5 * (val(s) + val(s, 0, 1, 0));
scalar_dset
else[num_cells] = 0.5 * (val(s) + val(s, 0, 0, 1));
scalar_dset++;
num_cells}
}
}
Populate vector_dset using the vector v
void populate_vector_dset(vector v, double *vector_dset, int num_cells, int *offset_cells, hsize_t *count, hsize_t *offset, scalar per_mask) {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = 3;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
foreach (serial, noauto){
if (per_mask[]){
#if !TREE
#if dimension == 2
int _k = (point.i - 2) * ((1 << point.level)) + (point.j - 2);
#else
int _k = (point.i - 2) * sq((1 << point.level)) + (point.j - 2) * ((1 << point.level)) + (point.k - 2);
#endif
#endif
// Calculate starting index
int ii = _k * 3;
// Store each component
[ii + 0] = v.x[];
vector_dset[ii + 1] = v.y[];
vector_dset#if dimension == 2
[ii + 2] = 0.;
vector_dset#else
[ii + 2] = v.z[];
vector_dset#endif
}
}
}
#if dimension == 3
void populate_vector_dset_slice(vector v, double *vector_dset, int num_cells, int *offset_cells, hsize_t *count,
*offset, scalar per_mask, coord n = {0, 0, 1}, double _alpha = 0){
hsize_t // Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = 3;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
= 0;
num_cells foreach (serial, noauto){
if (per_mask[]){
int ii = num_cells * 3;
if (n.x == 1){
[ii + 0] = 0.5 * (val(v.x) + val(v.x, 1, 0, 0));
vector_dset[ii + 1] = 0.5 * (val(v.y) + val(v.y, 1, 0, 0));
vector_dset[ii + 2] = 0.5 * (val(v.z) + val(v.z, 1, 0, 0));
vector_dset}
else if (n.y == 1){
[ii + 0] = 0.5 * (val(v.x) + val(v.x, 0, 1, 0));
vector_dset[ii + 1] = 0.5 * (val(v.y) + val(v.y, 0, 1, 0));
vector_dset[ii + 2] = 0.5 * (val(v.z) + val(v.z, 0, 1, 0));
vector_dset}
else{
[ii + 0] = 0.5 * (val(v.x) + val(v.x, 0, 0, 1));
vector_dset[ii + 1] = 0.5 * (val(v.y) + val(v.y, 0, 0, 1));
vector_dset[ii + 2] = 0.5 * (val(v.z) + val(v.z, 0, 0, 1));
vector_dset}
++;
num_cells}
}
}
#endif
Populate offsets_dset
void populate_offsets_dset(long **offsets_dset, char noffset, int num_cells, int *offset_cells, hsize_t *count, hsize_t *offset) {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = 1;
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
// Allocate memory for topo_dset
*offsets_dset = (long *)malloc(count[0] * count[1] * sizeof(long));
for (int i = 0; i < num_cells; ++i){
(*offsets_dset)[i] = (long)i * (long)noffset;
}
}
Populate topo_dset based on markers and dimensions
void populate_topo_dset(long **topo_dset, int num_cells, int *offset_cells, hsize_t *count, hsize_t *offset, scalar per_mask, vertex scalar marker) {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = pow(2, dimension);
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
// Allocate memory for topo_dset
*topo_dset = (long *)malloc(count[0] * count[1] * sizeof(long));
// Iterate over each cell
foreach (serial, noauto){
if (per_mask[]){
// _k exist by default on quad/octrees, but not on multigrid
#if !TREE
#if dimension == 2
// Calculate index for 2D
int _k = (point.i - 2) * ((1 << point.level)) + (point.j - 2);
#else
// Calculate index for 3D
int _k = (point.i - 2) * sq((1 << point.level)) + (point.j - 2) * ((1 << point.level)) + (point.k - 2);
#endif
#endif
// Calculate starting index for topo_dset
int ii = _k * count[1];
// Assign marker values to topo_dset
(*topo_dset)[ii + 0] = (long)marker[];
(*topo_dset)[ii + 1] = (long)marker[1, 0];
(*topo_dset)[ii + 2] = (long)marker[1, 1];
(*topo_dset)[ii + 3] = (long)marker[0, 1];
#if dimension == 3
// Additional assignments for 3D
(*topo_dset)[ii + 4] = (long)marker[0, 0, 1];
(*topo_dset)[ii + 5] = (long)marker[1, 0, 1];
(*topo_dset)[ii + 6] = (long)marker[1, 1, 1];
(*topo_dset)[ii + 7] = (long)marker[0, 1, 1];
#endif
}
}
[0] = num_cells*pow(2, dimension);
count[1] = 1;
count}
void populate_topo_dset_slice(long **topo_dset, int num_cells, int *offset_cells, hsize_t *count,
*offset, scalar per_mask, vertex scalar marker, coord n = {0, 0, 1}, double _alpha = 0)
hsize_t {
// Each process defines dataset in memory and writes to an hyperslab
[0] = num_cells;
count[1] = pow(2, dimension - 1);
count[0] = 0;
offset[1] = 0;
offsetif (pid() != 0){
for (int i = 1; i <= pid(); ++i){
[0] += offset_cells[i - 1];
offset}
}
// Allocate memory for topo_dset
*topo_dset = (long *)malloc(count[0] * count[1] * sizeof(long));
// Iterate over each cell
= 0;
num_cells foreach (serial, noauto){
if (per_mask[]){
// Calculate index
int ii = num_cells * count[1];
if (n.x == 1){
(*topo_dset)[ii + 0] = (long)marker[1, 0, 0];
(*topo_dset)[ii + 1] = (long)marker[1, 1, 0];
(*topo_dset)[ii + 2] = (long)marker[1, 1, 1];
(*topo_dset)[ii + 3] = (long)marker[1, 0, 1];
}
else if (n.y == 1){
(*topo_dset)[ii + 0] = (long)marker[0, 1, 0];
(*topo_dset)[ii + 1] = (long)marker[1, 1, 0];
(*topo_dset)[ii + 2] = (long)marker[1, 1, 1];
(*topo_dset)[ii + 3] = (long)marker[0, 1, 1];
}
else{
(*topo_dset)[ii + 0] = (long)marker[0, 0, 1];
(*topo_dset)[ii + 1] = (long)marker[1, 0, 1];
(*topo_dset)[ii + 2] = (long)marker[1, 1, 1];
(*topo_dset)[ii + 3] = (long)marker[0, 1, 1];
}
++;
num_cells}
}
[0] = num_cells*pow(2, dimension - 1);
count[1] = 1;
count}
Write Dataset
(hid_t group_id, const char *attrname_type, const char *attrvalue_type, size_t str_size) {
herr_t create_attribute_type, strtype, attr_id;
hid_t space_id;
herr_t status
// Create a scalar dataspace
= H5Screate(H5S_SCALAR);
space_id if (space_id < 0) {
fprintf(stderr, "Failed to create scalar dataspace\n");
return -1;
}
// Copy the string datatype and set its properties
= H5Tcopy(H5T_C_S1);
strtype if (strtype < 0) {
fprintf(stderr, "Failed to copy string datatype\n");
(space_id);
H5Sclosereturn -1;
}
= H5Tset_size(strtype, str_size);
status if (status < 0) {
fprintf(stderr, "Failed to set string size\n");
(strtype);
H5Tclose(space_id);
H5Sclosereturn -1;
}
= H5Tset_strpad(strtype, H5T_STR_NULLTERM);
status if (status < 0) {
fprintf(stderr, "Failed to set string padding\n");
(strtype);
H5Tclose(space_id);
H5Sclosereturn -1;
}
= H5Tset_cset(strtype, H5T_CSET_ASCII);
status if (status < 0) {
fprintf(stderr, "Failed to set character set\n");
(strtype);
H5Tclose(space_id);
H5Sclosereturn -1;
}
// Create the attribute
= H5Acreate2(group_id, attrname_type, strtype, space_id, H5P_DEFAULT, H5P_DEFAULT);
attr_id if (attr_id < 0) {
fprintf(stderr, "Failed to create attribute\n");
(strtype);
H5Tclose(space_id);
H5Sclosereturn -1;
}
// Write the attribute value
= H5Awrite(attr_id, strtype, attrvalue_type);
status if (status < 0) {
fprintf(stderr, "Failed to write attribute value\n");
(attr_id);
H5Aclose(strtype);
H5Tclose(space_id);
H5Sclosereturn -1;
}
// Close the attribute
= H5Aclose(attr_id);
status if (status < 0) {
fprintf(stderr, "Failed to close attribute\n");
(strtype);
H5Tclose(space_id);
H5Sclosereturn -1;
}
// Close the datatype
= H5Tclose(strtype);
status if (status < 0) {
fprintf(stderr, "Failed to close string datatype\n");
(space_id);
H5Sclosereturn -1;
}
// Close the dataspace
= H5Sclose(space_id);
status if (status < 0) {
fprintf(stderr, "Failed to close scalar dataspace\n");
return -1;
}
return 0;
}
// Function to create and write an attribute to an HDF5 group
(hid_t group_id, const char *attrname_version, const int *version_data, const hsize_t *dims) {
herr_t create_attribute, attr_id;
hid_t space_id;
herr_t status
// Create a simple dataspace
= H5Screate_simple(1, dims, NULL);
space_id if (space_id < 0) {
fprintf(stderr, "Failed to create simple dataspace\n");
return -1;
}
// Create the attribute
= H5Acreate2(group_id, attrname_version, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT);
attr_id if (attr_id < 0) {
fprintf(stderr, "Failed to create attribute\n");
(space_id);
H5Sclosereturn -1;
}
// Write the attribute value
= H5Awrite(attr_id, H5T_NATIVE_INT, version_data);
status if (status < 0) {
fprintf(stderr, "Failed to write attribute value\n");
(attr_id);
H5Aclose(space_id);
H5Sclosereturn -1;
}
// Close the attribute
= H5Aclose(attr_id);
status if (status < 0) {
fprintf(stderr, "Failed to close attribute\n");
(space_id);
H5Sclosereturn -1;
}
// Close the dataspace
= H5Sclose(space_id);
status if (status < 0) {
fprintf(stderr, "Failed to close simple dataspace\n");
return -1;
}
return 0;
}
// Function to create and write a simple dataset to an HDF5 group
(hid_t group_id, const char *dataset_name, const int *data, const hsize_t *dims) {
herr_t write_simple_dataset, dataset_id;
hid_t space_id;
herr_t status
// Create a simple dataspace
= H5Screate_simple(1, dims, NULL);
space_id if (space_id < 0) {
fprintf(stderr, "Failed to create simple dataspace\n");
return -1;
}
// Create the dataset
= H5Dcreate(group_id, dataset_name, H5T_NATIVE_LONG, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
dataset_id if (dataset_id < 0) {
fprintf(stderr, "Failed to create dataset\n");
(space_id);
H5Sclosereturn -1;
}
// Write the dataset value
= H5Dwrite(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
status if (status < 0) {
fprintf(stderr, "Failed to write dataset value\n");
(dataset_id);
H5Dclose(space_id);
H5Sclosereturn -1;
}
// Close the dataset
= H5Dclose(dataset_id);
status if (status < 0) {
fprintf(stderr, "Failed to close dataset\n");
(space_id);
H5Sclosereturn -1;
}
// Close the dataspace
= H5Sclose(space_id);
status if (status < 0) {
fprintf(stderr, "Failed to close simple dataspace\n");
return -1;
}
return 0;
}
create_chunked_dataset(): Creates a chunked dataset in an HDF5 file
The arguments and their default values are:
- file_id: HDF5 file identifier
- count: Size of dataset to create
- offset: Starting position for dataset creation
- dataset_name: Name of dataset to create
- num_cells: Total number of cells in dataset
- num_cells_loc: Number of cells to create in this call
- num_dims: Number of dimensions in dataset
- topo_dset: Pointer to data to write to dataset
- datatype: Data type of data to write to dataset
- chunk_size: Size of chunks in which dataset will be stored
- compression_level: Compression level
(
default=6
)
void create_chunked_dataset(hid_t file_id, hsize_t *count, hsize_t *offset, const char *dataset_name,
int num_cells, int num_cells_loc, int num_dims, const void *topo_dset,
, int chunk_size = num_cells_loc, int compression_level = 9)
hid_t datatype{
, dataset_id, memspace_id, plist_id, acc_tpl1;
hid_t dataspace_id[2];
hsize_t dims2[2];
hsize_t chunk_dims;
herr_t status
// Define dimensions
[0] = num_cells;
dims2[1] = num_dims;
dims2
// Create the dataspace
= H5Screate_simple(2, dims2, NULL);
dataspace_id if (dataspace_id < 0) {
fprintf(stderr, "Error creating dataspace\n");
return;
}
// Create the dataset creation property list and set the chunking properties
= H5Pcreate(H5P_DATASET_CREATE);
plist_id if (plist_id < 0) {
fprintf(stderr, "Error creating dataset creation property list\n");
(dataspace_id);
H5Sclosereturn;
}
[0] = chunk_size;
chunk_dims[1] = dims2[1];
chunk_dims= H5Pset_chunk(plist_id, 2, chunk_dims);
status if (status < 0) {
fprintf(stderr, "Error setting chunking properties\n");
(dataspace_id);
H5Sclose(plist_id);
H5Pclosereturn;
}
// Set the compression properties
= H5Pset_deflate(plist_id, compression_level);
status if (status < 0) {
fprintf(stderr, "Error setting compression properties\n");
(dataspace_id);
H5Sclose(plist_id);
H5Pclosereturn;
}
// Create the dataset with chunking and compression properties
= H5Dcreate2(file_id, dataset_name, datatype, dataspace_id, H5P_DEFAULT, plist_id, H5P_DEFAULT);
dataset_id if (dataset_id < 0) {
fprintf(stderr, "Error creating dataset\n");
(dataspace_id);
H5Sclose(plist_id);
H5Pclosereturn;
}
(dataspace_id);
H5Sclose
// Define memory space for the dataset
[0] = num_cells_loc;
count[1] = dims2[1];
count= H5Screate_simple(2, count, NULL);
memspace_id if (memspace_id < 0) {
fprintf(stderr, "Error creating memory space\n");
(dataset_id);
H5Dclose(plist_id);
H5Pclosereturn;
}
// Select hyperslab in the dataset
= H5Dget_space(dataset_id);
dataspace_id if (dataspace_id < 0) {
fprintf(stderr, "Error getting dataspace\n");
(dataset_id);
H5Dclose(memspace_id);
H5Sclose(plist_id);
H5Pclosereturn;
}
= H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, offset, NULL, count, NULL);
status if (status < 0) {
fprintf(stderr, "Error selecting hyperslab\n");
(dataset_id);
H5Dclose(dataspace_id);
H5Sclose(memspace_id);
H5Sclose(plist_id);
H5Pclosereturn;
}
// Create property list for collective dataset write
= H5Pcreate(H5P_DATASET_XFER);
acc_tpl1 if (acc_tpl1 < 0) {
fprintf(stderr, "Error creating property list for collective dataset write\n");
(dataset_id);
H5Dclose(dataspace_id);
H5Sclose(memspace_id);
H5Sclose(plist_id);
H5Pclosereturn;
}
= H5Pset_dxpl_mpio(acc_tpl1, H5FD_MPIO_COLLECTIVE);
status if (status < 0) {
fprintf(stderr, "Error setting collective dataset write property\n");
(dataset_id);
H5Dclose(dataspace_id);
H5Sclose(memspace_id);
H5Sclose(plist_id);
H5Pclose(acc_tpl1);
H5Pclosereturn;
}
// Write data to the dataset
= H5Dwrite(dataset_id, datatype, memspace_id, dataspace_id, acc_tpl1, topo_dset);
status if (status < 0) {
fprintf(stderr, "Error writing data to dataset\n");
(dataset_id);
H5Dclose(dataspace_id);
H5Sclose(memspace_id);
H5Sclose(plist_id);
H5Pclose(acc_tpl1);
H5Pclosereturn;
}
// Close all HDF5 objects to release resources
(dataset_id);
H5Dclose(dataspace_id);
H5Sclose(memspace_id);
H5Sclose(plist_id);
H5Pclose(acc_tpl1);
H5Pclose}