I have some code, which iteratively receives data which it dumps to a HDF5 file. Here is a toy example of what I am trying to achieve:
#include <HDFql.hpp>
void createHDF(const std::string & filepath)
{
char script_[1024];
sprintf(script_, "CREATE TRUNCATE FILE %s", filepath.c_str());
HDFql::execute(script_);
sprintf(script_, "USE FILE %s", filepath.c_str());
HDFql::execute(script_);
sprintf(script_, "CREATE GROUP events");
HDFql::execute(script_);
HDFql::execute("CREATE CHUNKED DATASET events/xs AS SMALLINT(UNLIMITED)");
HDFql::execute("CREATE CHUNKED DATASET events/ys AS SMALLINT(UNLIMITED)");
HDFql::execute("CREATE CHUNKED DATASET events/ts AS DOUBLE(UNLIMITED)");
HDFql::execute("CREATE CHUNKED DATASET events/ps AS TINYINT(UNLIMITED)");
sprintf(script_, "CREATE GROUP frames");
HDFql::execute(script_);
sprintf(script_, "CREATE GROUP optic_flow");
HDFql::execute(script_);
}
void writeData(const std::vector<double>& ts_v, std::vector<int16_t>& xs_v,
std::vector<int16_t>& ys_v, std::vector<int8_t>& ps_v)
{
//Input arrays are all the same size
const int data_size = ts_v.size();
//Open file
sprintf(script_, "USE FILE %s", HDF5_path_.c_str());
HDFql::execute(script_);
//Add events
sprintf(script_, "ALTER DIMENSION events/xs TO +%d", data_size);
HDFql::execute(script_);
sprintf(script_, "ALTER DIMENSION events/ys TO +%d", data_size);
HDFql::execute(script_);
sprintf(script_, "ALTER DIMENSION events/ts TO +%d", data_size);
HDFql::execute(script_);
sprintf(script_, "ALTER DIMENSION events/ps TO +%d", data_size);
HDFql::execute(script_);
HDFql::variableRegister(&xs_v[0]);
sprintf(script_, "INSERT INTO events/xs(-%d:1:1:%d) VALUES FROM MEMORY %d", data_size,
data_size, HDFql::variableGetNumber(&xs_v[0]));
HDFql::execute(script_);
HDFql::variableUnregister(&xs_v[0]);
HDFql::variableRegister(&ys_v[0]);
sprintf(script_, "INSERT INTO events/ys(-%d:1:1:%d) VALUES FROM MEMORY %d", data_size,
data_size, HDFql::variableGetNumber(&ys_v[0]));
HDFql::execute(script_);
HDFql::variableUnregister(&ys_v[0]);
HDFql::variableRegister(&ts_v[0]);
sprintf(script_, "INSERT INTO events/ts(-%d:1:1:%d) VALUES FROM MEMORY %d", data_size,
data_size, HDFql::variableGetNumber(&ts_v[0]));
HDFql::execute(script_);
HDFql::variableUnregister(&ts_v[0]);
HDFql::variableRegister(&ps_v[0]);
sprintf(script_, "INSERT INTO events/ps(-%d:1:1:%d) VALUES FROM MEMORY %d", data_size,
data_size, HDFql::variableGetNumber(&ps_v[0]));
HDFql::execute(script_);
HDFql::variableUnregister(&ps_v[0]);
total_events_added_ += data_size;
events_idx_++;
}
int main (int argc, const char * argv[]) {
std::string path = "/tmp/test.h5";
createHDF(path);
const int data_size = 1000;
const int iterations = 10000;
std::vector<double> ts(data_size);
std::vector<int16_t> xs(data_size);
std::vector<int16_t> ys(data_size);
std::vector<int8_t> ps(data_size);
for(int i=0; i<data_size; i++)
{
ts_v.push_back(i);
xs_v.push_back(i);
ys_v.push_back(i);
ps_v.push_back(1);
}
for(int i=0; i<iterations; i++)
{
writeData(ts, xs, ys, ps);
}
}
This code runs extremely slowly. Using other binary libraries such as cnpy
, this executes in the blink of an eye, so it is not the amount of data being written that is the issue. I was wondering if that is just how things are in HDFql, or whether there is some blunder in the code somewhere.
Many thanks!