diff --git a/examples/3_write_serial.cpp b/examples/3_write_serial.cpp index 71628bc671..d7089f4651 100644 --- a/examples/3_write_serial.cpp +++ b/examples/3_write_serial.cpp @@ -49,7 +49,7 @@ int main(int argc, char *argv[]) cout << "Created a scalar mesh Record with all required openPMD " "attributes\n"; - Datatype datatype = determineDatatype(shareRaw(global_data)); + Datatype datatype = determineDatatypeContiguous(global_data); Extent extent = {size, size}; Dataset dataset = Dataset(datatype, extent); cout << "Created a Dataset of size " << dataset.extent[0] << 'x' @@ -63,7 +63,7 @@ int main(int argc, char *argv[]) cout << "File structure and required attributes have been written\n"; Offset offset = {0, 0}; - rho.storeChunk(shareRaw(global_data), offset, extent); + rho.storeChunk(global_data, offset, extent); cout << "Stored the whole Dataset contents as a single chunk, " "ready to write content\n"; diff --git a/examples/3b_write_resizable_particles.cpp b/examples/3b_write_resizable_particles.cpp index d15dba92c6..10609dbffe 100644 --- a/examples/3b_write_resizable_particles.cpp +++ b/examples/3b_write_resizable_particles.cpp @@ -39,7 +39,7 @@ int main() std::vector y{-2., -3., -4., -5., -6.}; // both x and y the same type, otherwise we use two distinct datasets - Datatype dtype = determineDatatype(shareRaw(x)); + Datatype dtype = determineDatatypeContiguous(x); Extent size = {x.size()}; auto dataset = Dataset(dtype, size, "{ \"resizable\": true }"); diff --git a/include/openPMD/Datatype.hpp b/include/openPMD/Datatype.hpp index 39be86ab43..156227c4fd 100644 --- a/include/openPMD/Datatype.hpp +++ b/include/openPMD/Datatype.hpp @@ -20,6 +20,8 @@ */ #pragma once +#include "openPMD/auxiliary/TypeTraits.hpp" + #include #include #include @@ -277,161 +279,30 @@ inline constexpr Datatype determineDatatype() template inline constexpr Datatype determineDatatype(std::shared_ptr) { - using DT = Datatype; - if (decay_equiv::value) - { - return DT::CHAR; - } - else if (decay_equiv::value) - { - return DT::UCHAR; - } - else if (decay_equiv::value) - { - return DT::SCHAR; - } - else if (decay_equiv::value) - { - return DT::SHORT; - } - else if (decay_equiv::value) - { - return DT::INT; - } - else if (decay_equiv::value) - { - return DT::LONG; - } - else if (decay_equiv::value) - { - return DT::LONGLONG; - } - else if (decay_equiv::value) - { - return DT::USHORT; - } - else if (decay_equiv::value) - { - return DT::UINT; - } - else if (decay_equiv::value) - { - return DT::ULONG; - } - else if (decay_equiv::value) - { - return DT::ULONGLONG; - } - else if (decay_equiv::value) - { - return DT::FLOAT; - } - else if (decay_equiv::value) - { - return DT::DOUBLE; - } - else if (decay_equiv::value) - { - return DT::LONG_DOUBLE; - } - else if (decay_equiv>::value) - { - return DT::CFLOAT; - } - else if (decay_equiv>::value) - { - return DT::CDOUBLE; - } - else if (decay_equiv>::value) - { - return DT::CLONG_DOUBLE; - } - else if (decay_equiv::value) - { - return DT::STRING; - } - else if (decay_equiv>::value) - { - return DT::VEC_CHAR; - } - else if (decay_equiv>::value) - { - return DT::VEC_SHORT; - } - else if (decay_equiv>::value) - { - return DT::VEC_INT; - } - else if (decay_equiv>::value) - { - return DT::VEC_LONG; - } - else if (decay_equiv>::value) - { - return DT::VEC_LONGLONG; - } - else if (decay_equiv>::value) - { - return DT::VEC_UCHAR; - } - else if (decay_equiv>::value) - { - return DT::VEC_SCHAR; - } - else if (decay_equiv>::value) - { - return DT::VEC_USHORT; - } - else if (decay_equiv>::value) - { - return DT::VEC_UINT; - } - else if (decay_equiv>::value) - { - return DT::VEC_ULONG; - } - else if (decay_equiv>::value) - { - return DT::VEC_ULONGLONG; - } - else if (decay_equiv>::value) - { - return DT::VEC_FLOAT; - } - else if (decay_equiv>::value) - { - return DT::VEC_DOUBLE; - } - else if (decay_equiv>::value) - { - return DT::VEC_LONG_DOUBLE; - } - else if (decay_equiv>>::value) - { - return DT::VEC_CFLOAT; - } - else if (decay_equiv>>::value) - { - return DT::VEC_CDOUBLE; - } - else if (decay_equiv>>::value) - { - return DT::VEC_CLONG_DOUBLE; - } - else if (decay_equiv>::value) - { - return DT::VEC_STRING; - } - else if (decay_equiv>::value) + return determineDatatype(); +} + +template +inline constexpr Datatype determineDatatypeRaw(T const *) +{ + return determineDatatype(); +} + +template +inline constexpr Datatype determineDatatypeContiguous(T_ContiguousContainer &&) +{ + using T_ContiguousContainer_stripped = + std::remove_reference_t; + if constexpr (auxiliary::IsContiguousContainer_v< + T_ContiguousContainer_stripped>) { - return DT::ARR_DBL_7; + return determineDatatype< + typename T_ContiguousContainer_stripped::value_type>(); } - else if (decay_equiv::value) + else { - return DT::BOOL; + return Datatype::UNDEFINED; } - else - return DT::UNDEFINED; } /** Return number of bytes representing a Datatype diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index f8d08ab821..d4db58483b 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -67,7 +67,7 @@ namespace internal * The returned strings are compile-time constants, so no worries about * pointer validity. */ - std::string operationAsString(Operation); + OPENPMDAPI_EXPORT std::string operationAsString(Operation); } // namespace internal struct OPENPMDAPI_EXPORT AbstractParameter diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index d30f7684f2..d985ddd21d 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -21,7 +21,7 @@ #pragma once #include "openPMD/Dataset.hpp" -#include "openPMD/auxiliary/ShareRaw.hpp" +#include "openPMD/auxiliary/TypeTraits.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" #include @@ -42,34 +42,6 @@ namespace openPMD { -namespace traits -{ - /** Emulate in the C++17 concept ContiguousContainer - * - * Users can implement this trait for a type to signal it can be used as - * contiguous container. - * - * See: - * https://en.cppreference.com/w/cpp/named_req/ContiguousContainer - */ - template - struct IsContiguousContainer - { - static constexpr bool value = false; - }; - - template - struct IsContiguousContainer > - { - static constexpr bool value = true; - }; - - template - struct IsContiguousContainer > - { - static constexpr bool value = true; - }; -} // namespace traits template class DynamicMemoryView; @@ -225,26 +197,118 @@ class RecordComponent : public BaseRecordComponent template std::shared_ptr loadChunk(Offset = {0u}, Extent = {-1u}); - /** Load a chunk of data into pre-allocated memory + /** Load a chunk of data into pre-allocated memory. + * + * @param data Preallocated, contiguous buffer, large enough to load the + * the requested data into it. + * The shared pointer must either own and manage the buffer + * or have been created via shareRaw(). + * If using shareRaw(), it is in the user of this API call's + * responsibility to ensure that the lifetime of the buffer + * exceeds the next + * flush point. + * Optimizations might be implemented based on this + * assumption (e.g. skipping the operation if the backend + * is the unique owner). + * For raw pointers, use loadChunkRaw(). + * @param offset Offset within the dataset. Set to {0u} for full selection. + * @param extent Extent within the dataset, counted from the offset. + * Set to {-1u} for full selection. + * If offset is non-zero and extent is {-1u} the leftover + * extent in the record component will be selected. + */ + template + void loadChunk(std::shared_ptr data, Offset offset, Extent extent); + + /** Load a chunk of data into pre-allocated memory, raw pointer version. * - * shared_ptr for data must be pre-allocated, contiguous and large enough - * for extent + * @param data Preallocated, contiguous buffer, large enough to load the + * the requested data into it. + * It is in the user of this API call's responsibility to + * ensure that the lifetime of the buffer exceeds the next + * + * flush point. + * @param offset Offset within the dataset. Set to {0u} for full selection. + * @param extent Extent within the dataset, counted from the offset. + * Set to {-1u} for full selection. + * If offset is non-zero and extent is {-1u} the leftover + * extent in the record component will be selected. + */ + template + void loadChunkRaw(T *data, Offset offset, Extent extent); + + /** Store a chunk of data from a chunk of memory. * - * Set offset to {0u} and extent to {-1u} for full selection. + * @param data Preallocated, contiguous buffer, large enough to read the + * the specified data from it. + * The shared pointer must either own and manage the buffer + * or have been created via shareRaw(). + * If using shareRaw(), it is in the user of this API call's + * responsibility to ensure that the lifetime of the buffer + * exceeds the next + * flush point. + * Optimizations might be implemented based on this + * assumption (e.g. further deferring the operation if the + * backend is the unique owner). + * For raw pointers, use storeChunkRaw(). + * @param offset Offset within the dataset. + * @param extent Extent within the dataset, counted from the offset. + */ + template + void storeChunk(std::shared_ptr data, Offset offset, Extent extent); + + /** Store a chunk of data from a chunk of memory, array version. * - * If offset is non-zero and extent is {-1u} the leftover extent in the - * record component will be selected. + * @param data Preallocated, contiguous buffer, large enough to read the + * the specified data from it. + * The array-based overload helps avoid having to manually + * specify the delete[] destructor (C++17 feature). + * @param offset Offset within the dataset. + * @param extent Extent within the dataset, counted from the offset. */ template - void loadChunk(std::shared_ptr, Offset, Extent); + void storeChunk(std::shared_ptr, Offset, Extent); + /** Store a chunk of data from a chunk of memory, raw pointer version. + * + * @param data Preallocated, contiguous buffer, large enough to read the + * the specified data from it. + * It is in the user of this API call's responsibility to + * ensure that the lifetime of the buffer exceeds the next + * + * flush point. + * @param offset Offset within the dataset. + * @param extent Extent within the dataset, counted from the offset. + */ template - void storeChunk(std::shared_ptr, Offset, Extent); + void storeChunkRaw(T *data, Offset offset, Extent extent); + /** Store a chunk of data from a contiguous container. + * + * @param data + * Contiguous container, large enough to read the the + * specified data from it. A contiguous container in here is + * either a std::vector or a std::array. + * It is in the user of this API call's responsibility to + * ensure that the lifetime of the container exceeds the next + * + * flush point. + * @param offset Offset within the dataset. + * @param extent Extent within the dataset, counted from the offset. + */ template typename std::enable_if< - traits::IsContiguousContainer::value>::type - storeChunk(T_ContiguousContainer &, Offset = {0u}, Extent = {-1u}); + auxiliary::IsContiguousContainer_v>::type + storeChunk( + T_ContiguousContainer &data, + Offset offset = {0u}, + Extent extent = {-1u}); /** * @brief Overload of storeChunk() that lets the openPMD API allocate @@ -254,14 +318,17 @@ class RecordComponent : public BaseRecordComponent * users a view into its own buffers, avoiding the need to allocate * a new buffer. * - * Data can be written into the returned buffer until the next call to - * Series::flush() at which time the data will be read from. + * Data can be written into the returned buffer until the next + * flush point at which time the data will be read from. * * In order to provide a view into backend buffers, this call must possibly * create files and datasets in the backend, making it MPI-collective. * In order to avoid this, calling Series::flush() prior to this is * recommended to flush definitions. * + * @param offset Offset within the dataset. + * @param extent Extent within the dataset, counted from the offset. * @param createBuffer If the backend in use has no special support for this * operation, the openPMD API will fall back to creating a buffer, * queuing it for writing and returning a view into that buffer to @@ -277,7 +344,8 @@ class RecordComponent : public BaseRecordComponent * @return View into a buffer that can be filled with data. */ template - DynamicMemoryView storeChunk(Offset, Extent, F &&createBuffer); + DynamicMemoryView + storeChunk(Offset offset, Extent extent, F &&createBuffer); /** * Overload of span-based storeChunk() that uses operator new() to create diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 7e757fd7fd..d8b3239a25 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -23,6 +23,8 @@ #include "openPMD/RecordComponent.hpp" #include "openPMD/Span.hpp" +#include "openPMD/auxiliary/ShareRawInternal.hpp" +#include "openPMD/auxiliary/TypeTraits.hpp" namespace openPMD { @@ -157,6 +159,15 @@ inline void RecordComponent::loadChunk( } } +template +inline void RecordComponent::loadChunkRaw(T *ptr, Offset offset, Extent extent) +{ + loadChunk( + auxiliary::shareRaw(ptr), + std::move(offset), + std::move(extent)); +} + template< typename T > inline void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) @@ -208,11 +219,29 @@ RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) rc.m_chunks.push(IOTask(this, dWrite)); } +template +inline void +RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) +{ + storeChunk( + std::static_pointer_cast(std::move(data)), + std::move(o), + std::move(e)); +} + +template +void RecordComponent::storeChunkRaw(T *ptr, Offset offset, Extent extent) +{ + storeChunk( + auxiliary::shareRaw(ptr), + std::move(offset), + std::move(extent)); +} + template< typename T_ContiguousContainer > inline typename std::enable_if< - traits::IsContiguousContainer< T_ContiguousContainer >::value ->::type -RecordComponent::storeChunk(T_ContiguousContainer & data, Offset o, Extent e) + auxiliary::IsContiguousContainer_v >::type +RecordComponent::storeChunk(T_ContiguousContainer &data, Offset o, Extent e) { uint8_t dim = getDimensionality(); @@ -231,7 +260,10 @@ RecordComponent::storeChunk(T_ContiguousContainer & data, Offset o, Extent e) else extent = e; - storeChunk(shareRaw(data), offset, extent); + storeChunk( + auxiliary::shareRaw(data.data()), + offset, + extent); } template< typename T, typename F > @@ -310,6 +342,8 @@ RecordComponent::storeChunk( Offset o, Extent e, F && createBuffer ) auto &out = *getBufferView.out; if (!out.backendManagedBuffer) { + // note that data might have either + // type shared_ptr or shared_ptr auto data = std::forward(createBuffer)(size); out.ptr = static_cast(data.get()); storeChunk(std::move(data), std::move(o), std::move(e)); @@ -326,8 +360,12 @@ RecordComponent::storeChunk( Offset offset, Extent extent ) std::move( extent ), []( size_t size ) { +#if defined(__clang_major__) && __clang_major__ < 7 return std::shared_ptr< T >{ new T[ size ], []( auto * ptr ) { delete[] ptr; } }; +#else + return std::shared_ptr< T[] >{ new T[ size ] }; +#endif } ); } } diff --git a/include/openPMD/auxiliary/ShareRaw.hpp b/include/openPMD/auxiliary/ShareRaw.hpp index e3d2d1efb7..550560e43a 100644 --- a/include/openPMD/auxiliary/ShareRaw.hpp +++ b/include/openPMD/auxiliary/ShareRaw.hpp @@ -42,18 +42,29 @@ namespace openPMD * reference counting. */ template -std::shared_ptr shareRaw(T *x) +[[deprecated( + "For storing/loading data via raw pointers use " + "storeChunkRaw<>()/loadChunkRaw<>()")]] // +std::shared_ptr +shareRaw(T *x) { return std::shared_ptr(x, [](T *) {}); } template -std::shared_ptr shareRaw(T const *x) +[[deprecated( + "For storing/loading data via raw pointers use " + "storeChunkRaw<>()/loadChunkRaw<>()")]] // +std::shared_ptr +shareRaw(T const *x) { return std::shared_ptr(x, [](T const *) {}); } template +[[deprecated( + "For storing/loading data via raw pointers use " + "storeChunkRaw<>()/loadChunkRaw<>()")]] // auto shareRaw(T &c) -> std::shared_ptr::type> { @@ -62,6 +73,9 @@ auto shareRaw(T &c) } template +[[deprecated( + "For storing/loading data via raw pointers use " + "storeChunkRaw<>()/loadChunkRaw<>()")]] // auto shareRaw(T const &c) -> std::shared_ptr::type> { diff --git a/include/openPMD/auxiliary/ShareRawInternal.hpp b/include/openPMD/auxiliary/ShareRawInternal.hpp new file mode 100644 index 0000000000..b26f0b59f9 --- /dev/null +++ b/include/openPMD/auxiliary/ShareRawInternal.hpp @@ -0,0 +1,79 @@ +/* Copyright 2018-2021 Axel Huebl + * + * This file is part of openPMD-api. + * + * openPMD-api is free software: you can redistribute it and/or modify + * it under the terms of of either the GNU General Public License or + * the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * openPMD-api is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with openPMD-api. + * If not, see . + */ +#pragma once + +#include +#include +#include +#include + +/* + * This header copies ShareRaw.hpp, but: + * 1. without deprecation markings + * 2. for internal usage only + * 3. inside auxiliary namespace + */ + +namespace openPMD::auxiliary +{ +//! @{ +/** Share ownership with a raw pointer + * + * Helper function to share load/store data ownership + * unprotected and without reference counting with a + * raw pointer or stdlib container (that implements a + * contiguous data storage). + * + * @warning this is a helper function to bypass the shared-pointer + * API for storing data behind raw pointers. Using it puts + * the responsibility of buffer-consistency between stores + * and flushes to the users side without an indication via + * reference counting. + */ +template +std::shared_ptr shareRaw(T *x) +{ + return std::shared_ptr(x, [](T *) {}); +} + +template +std::shared_ptr shareRaw(T const *x) +{ + return std::shared_ptr(x, [](T const *) {}); +} + +template +auto shareRaw(T &c) + -> std::shared_ptr::type> +{ + using value_type = typename std::remove_pointer::type; + return std::shared_ptr(c.data(), [](value_type *) {}); +} + +template +auto shareRaw(T const &c) + -> std::shared_ptr::type> +{ + using value_type = typename std::remove_pointer::type; + return std::shared_ptr(c.data(), [](value_type *) {}); +} +//! @} +} // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/TypeTraits.hpp b/include/openPMD/auxiliary/TypeTraits.hpp index c3ed8a1cd8..8e3aa03118 100644 --- a/include/openPMD/auxiliary/TypeTraits.hpp +++ b/include/openPMD/auxiliary/TypeTraits.hpp @@ -59,4 +59,15 @@ inline constexpr bool IsVector_v = detail::IsVector::value; template inline constexpr bool IsArray_v = detail::IsArray::value; + +/** Emulate in the C++ concept ContiguousContainer + * + * Users can implement this trait for a type to signal it can be used as + * contiguous container. + * + * See: + * https://en.cppreference.com/w/cpp/named_req/ContiguousContainer + */ +template +inline constexpr bool IsContiguousContainer_v = IsVector_v || IsArray_v; } // namespace openPMD::auxiliary diff --git a/include/openPMD/backend/PatchRecordComponent.hpp b/include/openPMD/backend/PatchRecordComponent.hpp index 280b674ceb..4613ab8f2c 100644 --- a/include/openPMD/backend/PatchRecordComponent.hpp +++ b/include/openPMD/backend/PatchRecordComponent.hpp @@ -20,6 +20,7 @@ */ #pragma once +#include "openPMD/auxiliary/ShareRawInternal.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" #include @@ -84,6 +85,9 @@ class PatchRecordComponent : public BaseRecordComponent template void load(std::shared_ptr); + template + void loadRaw(T *); + template void store(uint64_t idx, T); @@ -169,6 +173,12 @@ inline void PatchRecordComponent::load(std::shared_ptr data) rc.m_chunks.push(IOTask(this, dRead)); } +template +inline void PatchRecordComponent::loadRaw(T *data) +{ + load(auxiliary::shareRaw(data)); +} + template inline void PatchRecordComponent::store(uint64_t idx, T data) { diff --git a/src/binding/python/PatchRecordComponent.cpp b/src/binding/python/PatchRecordComponent.cpp index a734e1d6b9..f2fe0aa753 100644 --- a/src/binding/python/PatchRecordComponent.cpp +++ b/src/binding/python/PatchRecordComponent.cpp @@ -22,7 +22,6 @@ #include #include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" @@ -37,7 +36,7 @@ struct Prc_Load template static void call(PatchRecordComponent &prc, py::array &a) { - prc.load(shareRaw((T *)a.mutable_data())); + prc.loadRaw((T *)a.mutable_data()); } static constexpr char const *errorMsg = "Datatype not known in 'load'!"; diff --git a/src/binding/python/RecordComponent.cpp b/src/binding/python/RecordComponent.cpp index c2c1acfebb..1b6c4ebee5 100644 --- a/src/binding/python/RecordComponent.cpp +++ b/src/binding/python/RecordComponent.cpp @@ -25,7 +25,6 @@ #include "openPMD/DatatypeHelpers.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Series.hpp" -#include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" #include "openPMD/binding/python/Pickle.hpp" diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index fa2a1cad42..42045789a5 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -836,7 +836,7 @@ TEST_CASE("wrapper_test", "[core]") MeshRecordComponent mrc2 = o.iterations[4].meshes["E"]["y"]; REQUIRE(mrc2.constant()); double loadData; - mrc2.loadChunk(shareRaw(&loadData), {0}, {1}); + mrc2.loadChunkRaw(&loadData, {0}, {1}); o.flush(); REQUIRE(loadData == value); // TODO: do we want to be able to make data constant after already writing @@ -846,7 +846,7 @@ TEST_CASE("wrapper_test", "[core]") Catch::Equals("A recordComponent can not (yet) be made constant after " "it has been written.")); std::array moreData = {{112233.}}; - o.iterations[4].meshes["E"]["y"].loadChunk(shareRaw(moreData), {0}, {1}); + o.iterations[4].meshes["E"]["y"].loadChunkRaw(moreData.data(), {0}, {1}); o.flush(); REQUIRE(moreData[0] == value); auto all_data = o.iterations[4].meshes["E"]["y"].loadChunk(); @@ -862,8 +862,7 @@ TEST_CASE("wrapper_test", "[core]") Dataset(Datatype::DOUBLE, {1})); int wrongData = 42; REQUIRE_THROWS_WITH( - o.iterations[5].meshes["E"]["y"].storeChunk( - shareRaw(&wrongData), {0}, {1}), + o.iterations[5].meshes["E"]["y"].storeChunkRaw(&wrongData, {0}, {1}), Catch::Equals("Datatypes of chunk data (INT) and record component " "(DOUBLE) do not match.")); std::shared_ptr storeData = std::make_shared(44); diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 1ca922c022..3dd0929450 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -846,7 +846,7 @@ void file_based_write_read(std::string file_ending) Offset chunk_offset = {0, local_Nz * mpi_rank}; Extent chunk_extent = {global_Nx, local_Nz}; - E_x.storeChunk(io::shareRaw(E_x_data), chunk_offset, chunk_extent); + E_x.storeChunk(E_x_data, chunk_offset, chunk_extent); series.flush(); } } diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index eac05b0214..e2a85e2088 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -1812,8 +1812,7 @@ inline void fileBased_write_test(const std::string &backend) for (uint64_t i = 0; i < 4; ++i) { double const position_local_2 = position_global.at(i); - e_2["position"]["x"].storeChunk( - shareRaw(&position_local_2), {i}, {1}); + e_2["position"]["x"].storeChunkRaw(&position_local_2, {i}, {1}); *positionOffset_local_2 = positionOffset_global[i]; e_2["positionOffset"]["x"].storeChunk( positionOffset_local_2, {i}, {1}); @@ -3570,7 +3569,7 @@ TEST_CASE("hzdr_hdf5_sample_content_test", "[serial][hdf5]") isSame(e_extent_z.getDatatype(), determineDatatype())); std::vector data(e_patches.size()); - e_extent_z.load(shareRaw(data.data())); + e_extent_z.loadRaw(data.data()); species_e.seriesFlush(); REQUIRE(data.at(0) == static_cast(80)); REQUIRE(data.at(1) == static_cast(80)); @@ -3592,7 +3591,7 @@ TEST_CASE("hzdr_hdf5_sample_content_test", "[serial][hdf5]") e_numParticles_scalar.getDatatype(), determineDatatype())); - e_numParticles_scalar.load(shareRaw(data.data())); + e_numParticles_scalar.loadRaw(data.data()); o.flush(); REQUIRE(data.at(0) == static_cast(512000)); REQUIRE(data.at(1) == static_cast(819200)); @@ -3638,7 +3637,7 @@ TEST_CASE("hzdr_hdf5_sample_content_test", "[serial][hdf5]") REQUIRE( isSame(e_offset_y.getDatatype(), determineDatatype())); - e_offset_y.load(shareRaw(data.data())); + e_offset_y.loadRaw(data.data()); o.flush(); REQUIRE(data.at(0) == static_cast(0)); REQUIRE(data.at(1) == static_cast(128)); @@ -6601,8 +6600,8 @@ void groupbased_read_write(std::string const &ext) auto E_y = write.iterations[0].meshes["E"]["y"]; E_x.resetDataset(ds); E_y.resetDataset(ds); - E_x.storeChunk(shareRaw(&data), {0}, {1}); - E_y.storeChunk(shareRaw(&data), {0}, {1}); + E_x.storeChunkRaw(&data, {0}, {1}); + E_y.storeChunkRaw(&data, {0}, {1}); E_x.setAttribute("updated_in_run", 0); E_y.setAttribute("updated_in_run", 0); @@ -6619,8 +6618,8 @@ void groupbased_read_write(std::string const &ext) data = 1; - E_x.storeChunk(shareRaw(&data), {0}, {1}); - E_y.storeChunk(shareRaw(&data), {0}, {1}); + E_x.storeChunkRaw(&data, {0}, {1}); + E_y.storeChunkRaw(&data, {0}, {1}); E_x.setAttribute("updated_in_run", 1); E_y.setAttribute("updated_in_run", 1); @@ -6656,7 +6655,7 @@ void groupbased_read_write(std::string const &ext) data = 2; - E_x.storeChunk(shareRaw(&data), {0}, {1}); + E_x.storeChunkRaw(&data, {0}, {1}); E_x.setAttribute("updated_in_run", 2); }