diff --git a/testing/adios2/bindings/C/TestBPAvailableVariablesAttribites.cpp b/testing/adios2/bindings/C/TestBPAvailableVariablesAttribites.cpp index c4ad9f62bd..8399de3c50 100644 --- a/testing/adios2/bindings/C/TestBPAvailableVariablesAttribites.cpp +++ b/testing/adios2/bindings/C/TestBPAvailableVariablesAttribites.cpp @@ -99,8 +99,11 @@ TEST_F(BPAvailableVariablesAttributes, AvailableVariablesAttributes) adios2_variable *varR64 = adios2_define_variable( ioH, "varR64", adios2_type_double, 1, shape, start, count, adios2_constant_dims_false); +#if ADIOS2_USE_MPI + adios2_engine *engineH = adios2_open(ioH, "available_MPI.bp", adios2_mode_write); +#else adios2_engine *engineH = adios2_open(ioH, "available.bp", adios2_mode_write); - +#endif for (size_t i = 0; i < steps; ++i) { adios2_begin_step(engineH, adios2_step_mode_append, -1., &status); @@ -177,8 +180,11 @@ TEST_F(BPAvailableVariablesAttributes, AvailableVariablesAttributes) std::vector inR64(data_Nx / 2); adios2_io *ioH = adios2_declare_io(adiosH, "Reader"); +#if ADIOS2_USE_MPI + adios2_engine *engineH = adios2_open(ioH, "available_MPI.bp", adios2_mode_read); +#else adios2_engine *engineH = adios2_open(ioH, "available.bp", adios2_mode_read); - +#endif size_t nsteps; adios2_steps(&nsteps, engineH); EXPECT_EQ(nsteps, steps); diff --git a/testing/adios2/bindings/C/TestBPMemorySpace.cpp b/testing/adios2/bindings/C/TestBPMemorySpace.cpp index 65c2381969..d947ccad2d 100644 --- a/testing/adios2/bindings/C/TestBPMemorySpace.cpp +++ b/testing/adios2/bindings/C/TestBPMemorySpace.cpp @@ -52,7 +52,11 @@ TEST_F(ADIOS2_C_API, ADIOS2BPMemorySpaceGPU) TEST_F(ADIOS2_C_API, ADIOS2BPMemorySpaceShape) { +#if ADIOS2_USE_MPI + const char fname[] = "ADIOS2_C_API.ADIOS2BPMemorySpace_MPI.bp"; +#else const char fname[] = "ADIOS2_C_API.ADIOS2BPMemorySpace.bp"; +#endif // write { adios2_io *ioH = adios2_declare_io(adiosH, "CMemSpace"); diff --git a/testing/adios2/bindings/C/TestBPWriteAggregateReadLocal.cpp b/testing/adios2/bindings/C/TestBPWriteAggregateReadLocal.cpp index 941d70e5cb..e8d616405e 100644 --- a/testing/adios2/bindings/C/TestBPWriteAggregateReadLocal.cpp +++ b/testing/adios2/bindings/C/TestBPWriteAggregateReadLocal.cpp @@ -23,8 +23,11 @@ void LocalAggregate1D(const std::string substreams) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array +#if ADIOS2_USE_MPI + const std::string fname("LocalAggregate1D_" + substreams + "_MPI.bp"); +#else const std::string fname("LocalAggregate1D_" + substreams + ".bp"); - +#endif int mpiRank = 0, mpiSize = 1; // Number of steps constexpr size_t NSteps = 5; @@ -151,8 +154,11 @@ void LocalAggregate1DBlock0(const std::string substreams) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array +#if ADIOS2_USE_MPI + const std::string fname("LocalAggregate1DSubFile_" + substreams + "_MPI.bp"); +#else const std::string fname("LocalAggregate1DSubFile_" + substreams + ".bp"); - +#endif int mpiRank = 0, mpiSize = 1; // Number of steps constexpr size_t NSteps = 5; diff --git a/testing/adios2/bindings/C/TestBPWriteReadMultiblock.cpp b/testing/adios2/bindings/C/TestBPWriteReadMultiblock.cpp index 439790103b..4f34f971f2 100644 --- a/testing/adios2/bindings/C/TestBPWriteReadMultiblock.cpp +++ b/testing/adios2/bindings/C/TestBPWriteReadMultiblock.cpp @@ -94,7 +94,11 @@ TEST_F(BPWriteReadMultiblockCC, ZeroSizeBlocks) adios2_variable *varR64 = adios2_define_variable( ioH, "varR64", adios2_type_double, 1, shape, start, count, adios2_constant_dims_false); +#if ADIOS2_USE_MPI + adios2_engine *engineH = adios2_open(ioH, "cmblocks_MPI.bp", adios2_mode_write); +#else adios2_engine *engineH = adios2_open(ioH, "cmblocks.bp", adios2_mode_write); +#endif for (size_t i = 0; i < steps; ++i) { @@ -172,8 +176,11 @@ TEST_F(BPWriteReadMultiblockCC, ZeroSizeBlocks) std::vector inR64(data_Nx / 2); adios2_io *ioH = adios2_declare_io(adiosH, "Reader"); +#if ADIOS2_USE_MPI + adios2_engine *engineH = adios2_open(ioH, "cmblocks_MPI.bp", adios2_mode_read); +#else adios2_engine *engineH = adios2_open(ioH, "cmblocks.bp", adios2_mode_read); - +#endif size_t nsteps; adios2_steps(&nsteps, engineH); EXPECT_EQ(nsteps, steps); diff --git a/testing/adios2/bindings/C/TestBPWriteTypes.cpp b/testing/adios2/bindings/C/TestBPWriteTypes.cpp index 94b1214b51..c3b6e4b872 100644 --- a/testing/adios2/bindings/C/TestBPWriteTypes.cpp +++ b/testing/adios2/bindings/C/TestBPWriteTypes.cpp @@ -45,7 +45,11 @@ class ADIOS2_C_API : public ::testing::Test TEST_F(ADIOS2_C_API, ADIOS2BPWriteTypes) { +#if ADIOS2_USE_MPI + const char fname[] = "ADIOS2_C_API.ADIOS2BPWriteTypes_MPI.bp"; +#else const char fname[] = "ADIOS2_C_API.ADIOS2BPWriteTypes.bp"; +#endif // write { // IO @@ -408,7 +412,11 @@ std::string adios2_engine_name_as_string(adios2_engine *engineH) TEST_F(ADIOS2_C_API_IO, Engine) { +#if ADIOS2_USE_MPI + const char fname[] = "ADIOS2_C_API_IO.engine_MPI.bp"; +#else const char fname[] = "ADIOS2_C_API_IO.engine.bp"; +#endif int ierr; ierr = adios2_set_engine(ioH, "bpfile"); diff --git a/testing/adios2/bindings/C/TestNullWriteRead.cpp b/testing/adios2/bindings/C/TestNullWriteRead.cpp index 3ea6322da1..dcf9d0708e 100644 --- a/testing/adios2/bindings/C/TestNullWriteRead.cpp +++ b/testing/adios2/bindings/C/TestNullWriteRead.cpp @@ -27,7 +27,6 @@ TEST_F(NullWriteReadTests_C_API, NullWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("NullWriteRead1D8_c.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -45,8 +44,10 @@ TEST_F(NullWriteReadTests_C_API, NullWriteRead1D8) #if ADIOS2_USE_MPI adios2_adios *adios = adios2_init_mpi(MPI_COMM_WORLD); + const std::string fname("NullWriteRead1D8_c_MPI.bp"); #else adios2_adios *adios = adios2_init_serial(); + const std::string fname("NullWriteRead1D8_c.bp"); #endif { adios2_io *io = adios2_declare_io(adios, "WriteNull"); diff --git a/testing/adios2/bindings/fortran/TestBPWriteTypes.F90 b/testing/adios2/bindings/fortran/TestBPWriteTypes.F90 index 2145baedf8..d5086631ff 100644 --- a/testing/adios2/bindings/fortran/TestBPWriteTypes.F90 +++ b/testing/adios2/bindings/fortran/TestBPWriteTypes.F90 @@ -240,13 +240,20 @@ program TestBPWriteTypes stop 1 end if +#if ADIOS2_USE_MPI + call adios2_open(bpWriter, ioWrite, "ftypes_mpi.bp", adios2_mode_write, ierr) +#else call adios2_open(bpWriter, ioWrite, "ftypes.bp", adios2_mode_write, ierr) - +#endif if( bpWriter%valid .eqv. .false. ) then write(*,*) 'Invalid adios2_engine post-open' stop 1 end if +#if ADIOS2_USE_MPI + if( TRIM(bpWriter%name) /= "ftypes_mpi.bp") then +#else if( TRIM(bpWriter%name) /= "ftypes.bp") then +#endif write(*,*) 'Invalid adios2_engine name' stop 1 end if @@ -307,8 +314,11 @@ program TestBPWriteTypes ! Declare io reader call adios2_declare_io(ioRead, adios, "ioRead", ierr) ! Open bpReader engine +#if ADIOS2_USE_MPI + call adios2_open(bpReader, ioRead, "ftypes_mpi.bp", adios2_mode_readRandomAccess, ierr) +#else call adios2_open(bpReader, ioRead, "ftypes.bp", adios2_mode_readRandomAccess, ierr) - +#endif call adios2_steps(nsteps, bpReader, ierr) if(nsteps /= 3) then write(*,*) 'ftypes.bp must have 3 steps' diff --git a/testing/adios2/engine/bp/TestBPAccuracyDefaults.cpp b/testing/adios2/engine/bp/TestBPAccuracyDefaults.cpp index 96eb8e61d1..ea8562ea27 100644 --- a/testing/adios2/engine/bp/TestBPAccuracyDefaults.cpp +++ b/testing/adios2/engine/bp/TestBPAccuracyDefaults.cpp @@ -31,7 +31,6 @@ class AccuracyTests : public ::testing::Test // Check if SetAccuracy/GetAccuracy default behavior works TEST_F(AccuracyTests, DefaultAccuracy) { - const std::string fname("DefaultAccuracy.bp"); int mpiRank = 0, mpiSize = 1; @@ -40,6 +39,9 @@ TEST_F(AccuracyTests, DefaultAccuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("DefaultAccuracy_MPI.bp"); +#else + const std::string fname("DefaultAccuracy.bp"); #endif std::vector localData(Nx); diff --git a/testing/adios2/engine/bp/TestBPBufferSize.cpp b/testing/adios2/engine/bp/TestBPBufferSize.cpp index 82764552f5..9127a22f53 100644 --- a/testing/adios2/engine/bp/TestBPBufferSize.cpp +++ b/testing/adios2/engine/bp/TestBPBufferSize.cpp @@ -79,9 +79,6 @@ size_t GetAndPrintBufferSize(adios2::Engine &engine, const std::string &info, // Put(Sync) and Put(Deferred) should have the same buffer consumption TEST_F(BPBufferSizeTest, SyncDeferredIdenticalUsage) { - std::string fnameSync = "ADIOS2BPBufferSizeSync.bp"; - std::string fnameDeferred = "ADIOS2BPBufferSizeDeferred.bp"; - std::string fnameDeferredPP = "ADIOS2BPBufferSizeDeferredPP.bp"; int mpiRank = 0, mpiSize = 1; // Number of rows const std::size_t Nx = 10485760; // 10M elements, 80MB variable @@ -94,6 +91,14 @@ TEST_F(BPBufferSizeTest, SyncDeferredIdenticalUsage) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + std::string fnameSync = "ADIOS2BPBufferSizeSync_MPI.bp"; + std::string fnameDeferred = "ADIOS2BPBufferSizeDeferred_MPI.bp"; + std::string fnameDeferredPP = "ADIOS2BPBufferSizeDeferredPP_MPI.bp"; +#else + std::string fnameSync = "ADIOS2BPBufferSizeSync.bp"; + std::string fnameDeferred = "ADIOS2BPBufferSizeDeferred.bp"; + std::string fnameDeferredPP = "ADIOS2BPBufferSizeDeferredPP.bp"; + #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPChangingShape.cpp b/testing/adios2/engine/bp/TestBPChangingShape.cpp index 9beceadb8b..6fdadb6248 100644 --- a/testing/adios2/engine/bp/TestBPChangingShape.cpp +++ b/testing/adios2/engine/bp/TestBPChangingShape.cpp @@ -31,15 +31,16 @@ TEST_F(BPChangingShape, BPWriteReadShape2D) // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("BPChangingShape.bp"); const int nsteps = 10; int rank = 0, nproc = 1; #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); + const std::string fname("BPChangingShape_MPI.bp"); adios2::ADIOS adios(MPI_COMM_WORLD); #else + const std::string fname("BPChangingShape.bp"); adios2::ADIOS adios; #endif // Writer diff --git a/testing/adios2/engine/bp/TestBPChangingShapeWithinStep.cpp b/testing/adios2/engine/bp/TestBPChangingShapeWithinStep.cpp index b8392cc92f..1ee438d0cb 100644 --- a/testing/adios2/engine/bp/TestBPChangingShapeWithinStep.cpp +++ b/testing/adios2/engine/bp/TestBPChangingShapeWithinStep.cpp @@ -38,7 +38,6 @@ TEST_P(BPChangingShapeWithinStep, MultiBlock) auto params = std::get<1>(GetParam()); double epsilon = std::get<2>(GetParam()); - const std::string fname("BPChangingShapeMultiblock_" + operatorName + ".bp"); const int nsteps = 2; const std::vector nblocks = {2, 3}; const int N = 16384; // size of one block (should be big enough to compress) @@ -48,8 +47,10 @@ TEST_P(BPChangingShapeWithinStep, MultiBlock) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nproc); + const std::string fname("BPChangingShapeMultiblock_" + operatorName + "_MPI.bp"); adios2::ADIOS adios(MPI_COMM_WORLD); #else + const std::string fname("BPChangingShapeMultiblock_" + operatorName + ".bp"); adios2::ADIOS adios; #endif diff --git a/testing/adios2/engine/bp/TestBPDirectIO.cpp b/testing/adios2/engine/bp/TestBPDirectIO.cpp index 67e3884b5c..a0edb30d89 100644 --- a/testing/adios2/engine/bp/TestBPDirectIO.cpp +++ b/testing/adios2/engine/bp/TestBPDirectIO.cpp @@ -27,13 +27,15 @@ TEST_F(ADIOSReadDirectIOTest, BufferResize) and the last chunck is resized back. It should be properly aligned to not cause any problems at writing that chunk. */ - std::string filename = "ADIOSDirectIO.bp"; int mpiRank = 0, mpiSize = 1; #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + std::string filename = "ADIOSDirectIO_MPI.bp"; +#else + std::string filename = "ADIOSDirectIO.bp"; #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPFStreamWriteReadHighLevelAPI.cpp b/testing/adios2/engine/bp/TestBPFStreamWriteReadHighLevelAPI.cpp index f3c3e2b6b2..6a6c71adb8 100644 --- a/testing/adios2/engine/bp/TestBPFStreamWriteReadHighLevelAPI.cpp +++ b/testing/adios2/engine/bp/TestBPFStreamWriteReadHighLevelAPI.cpp @@ -32,7 +32,6 @@ TEST_F(StreamWriteReadHighLevelAPI, ADIOS2BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteRead1D8_hl.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -44,6 +43,9 @@ TEST_F(StreamWriteReadHighLevelAPI, ADIOS2BPWriteRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead1D8_hl_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead1D8_hl.bp"); #endif // write test data using BP @@ -414,7 +416,6 @@ TEST_F(StreamWriteReadHighLevelAPI, ADIOS2BPwriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPwriteRead2D2x4Test_hl.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -429,6 +430,9 @@ TEST_F(StreamWriteReadHighLevelAPI, ADIOS2BPwriteRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPwriteRead2D2x4Test_hl_MPI.bp"); +#else + const std::string fname("ADIOS2BPwriteRead2D2x4Test_hl.bp"); #endif // write test data using ADIOS2 @@ -534,7 +538,6 @@ TEST_F(StreamWriteReadHighLevelAPI, ADIOS2BPwriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPwriteRead2D4x2Test_hl.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -548,6 +551,9 @@ TEST_F(StreamWriteReadHighLevelAPI, ADIOS2BPwriteRead2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPwriteRead2D4x2Test_hl_MPI.bp"); +#else + const std::string fname("ADIOS2BPwriteRead2D4x2Test_hl.bp"); #endif // write test data using ADIOS2 @@ -652,16 +658,18 @@ TEST_F(StreamWriteReadHighLevelAPI, DoubleOpenException) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BP_hl_exception.bp"); { #if ADIOS2_USE_MPI + const std::string fname("ADIOS2BP_hl_exception_MPI.bp"); adios2::fstream oStream(fname, adios2::fstream::out, MPI_COMM_WORLD, engineName); EXPECT_THROW(oStream.open("second", adios2::fstream::out, MPI_COMM_WORLD, engineName), std::invalid_argument); #else + const std::string fname("ADIOS2BP_hl_exception.bp"); + adios2::fstream oStream(fname, adios2::fstream::out); EXPECT_THROW(oStream.open("second", adios2::fstream::out, engineName), std::invalid_argument); diff --git a/testing/adios2/engine/bp/TestBPFortranToCppReader.cpp b/testing/adios2/engine/bp/TestBPFortranToCppReader.cpp index ec81cd9e0b..f79ed890cb 100644 --- a/testing/adios2/engine/bp/TestBPFortranToCppReader.cpp +++ b/testing/adios2/engine/bp/TestBPFortranToCppReader.cpp @@ -27,13 +27,15 @@ class BPFortranToCppRead : public ::testing::Test TEST_F(BPFortranToCppRead, ADIOS2BPFortranToCppRead) { - const std::string fname("FortranToCpp.bp"); int mpiRank = 0, mpiSize = 1; #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("FortranToCpp_MPI.bp"); +#else + const std::string fname("FortranToCpp.bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/TestBPFortranToCppWriter.F90 b/testing/adios2/engine/bp/TestBPFortranToCppWriter.F90 index 2fc0a46e9c..0c5de0df8e 100644 --- a/testing/adios2/engine/bp/TestBPFortranToCppWriter.F90 +++ b/testing/adios2/engine/bp/TestBPFortranToCppWriter.F90 @@ -76,8 +76,13 @@ end function iargc if (irank == 0) print *,"engine type :",trim(engine_type) +#if ADIOS2_USE_MPI + call adios2_open(bpWriter, ioWrite, "FortranToCpp_MPI.bp", & + adios2_mode_write, ierr) +#else call adios2_open(bpWriter, ioWrite, "FortranToCpp.bp", & adios2_mode_write, ierr) +#endif do s = 1, 3 call adios2_begin_step(bpWriter, ierr) diff --git a/testing/adios2/engine/bp/TestBPInquireDefine.cpp b/testing/adios2/engine/bp/TestBPInquireDefine.cpp index 05c827ef47..47c12df9f1 100644 --- a/testing/adios2/engine/bp/TestBPInquireDefine.cpp +++ b/testing/adios2/engine/bp/TestBPInquireDefine.cpp @@ -24,7 +24,6 @@ class ADIOSInquireDefineTest : public ::testing::Test TEST_F(ADIOSInquireDefineTest, Read) { - std::string filename = "ADIOSInquireDefine.bp"; // Number of steps const int32_t NSteps = 5; @@ -33,6 +32,9 @@ TEST_F(ADIOSInquireDefineTest, Read) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + std::string filename = "ADIOSInquireDefine_MPI.bp"; +#else + std::string filename = "ADIOSInquireDefine.bp"; #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPInquireVariableException.cpp b/testing/adios2/engine/bp/TestBPInquireVariableException.cpp index c0d251f651..f798dae94d 100644 --- a/testing/adios2/engine/bp/TestBPInquireVariableException.cpp +++ b/testing/adios2/engine/bp/TestBPInquireVariableException.cpp @@ -18,7 +18,6 @@ class ADIOSInquireVariableException : public ::testing::Test TEST_F(ADIOSInquireVariableException, Read) { - std::string filename = "ADIOSInquireVariableException"; // Number of steps const std::size_t NSteps = 5; @@ -28,6 +27,9 @@ TEST_F(ADIOSInquireVariableException, Read) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); + std::string filename = "ADIOSInquireVariableException"; +#else + std::string filename = "ADIOSInquireVariableException"; #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPLargeMetadata.cpp b/testing/adios2/engine/bp/TestBPLargeMetadata.cpp index e190d40c71..598ea2ecc9 100644 --- a/testing/adios2/engine/bp/TestBPLargeMetadata.cpp +++ b/testing/adios2/engine/bp/TestBPLargeMetadata.cpp @@ -34,7 +34,6 @@ TEST_F(BPLargeMetadata, BPWrite1D_LargeMetadata) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("BPWrite1D_LargeMetadata.bp"); int mpiRank = 0, mpiSize = 1; @@ -45,6 +44,9 @@ TEST_F(BPLargeMetadata, BPWrite1D_LargeMetadata) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWrite1D_LargeMetadata_MPI.bp"); +#else + const std::string fname("BPWrite1D_LargeMetadata.bp"); #endif // Write test data using ADIOS2 @@ -99,7 +101,6 @@ TEST_F(BPLargeMetadata, BPWrite1D_LargeMetadata) TEST_F(BPLargeMetadata, ManyLongStrings) { - const std::string fname("BPWrite1D_LargeMetadataStrings.bp"); const std::string longString = "test_string " "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" @@ -110,8 +111,10 @@ TEST_F(BPLargeMetadata, ManyLongStrings) #if ADIOS2_USE_MPI adios2::ADIOS adios(MPI_COMM_WORLD); + const std::string fname("BPWrite1D_LargeMetadataStrings_MPI.bp"); #else adios2::ADIOS adios; + const std::string fname("BPWrite1D_LargeMetadataStrings.bp"); #endif adios2::IO io = adios.DeclareIO("myIO"); diff --git a/testing/adios2/engine/bp/TestBPTimeAggregation.cpp b/testing/adios2/engine/bp/TestBPTimeAggregation.cpp index e567b6ff1f..1d65e0569c 100644 --- a/testing/adios2/engine/bp/TestBPTimeAggregation.cpp +++ b/testing/adios2/engine/bp/TestBPTimeAggregation.cpp @@ -21,7 +21,6 @@ void TimeAggregation1D8(const std::string flushstepscount) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname = "BPTimeAggregation1D8_" + flushstepscount + ".bp"; int mpiRank = 0, mpiSize = 1; // Number of rows @@ -33,6 +32,9 @@ void TimeAggregation1D8(const std::string flushstepscount) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname = "BPTimeAggregation1D8_" + flushstepscount + "_MPI.bp"; +#else + const std::string fname = "BPTimeAggregation1D8_" + flushstepscount + ".bp"; #endif // Write test data using ADIOS2 @@ -343,7 +345,6 @@ void TimeAggregation2D4x2(const std::string flushstepscount) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname = "BPTimeAggregation2D2x4_" + flushstepscount + ".bp"; int mpiRank = 0, mpiSize = 1; // Number of rows @@ -358,6 +359,9 @@ void TimeAggregation2D4x2(const std::string flushstepscount) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname = "BPTimeAggregation2D2x4_" + flushstepscount + "_MPI.bp"; +#else + const std::string fname = "BPTimeAggregation2D2x4_" + flushstepscount + ".bp"; #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/bp/TestBPWriteAppendReadADIOS2.cpp b/testing/adios2/engine/bp/TestBPWriteAppendReadADIOS2.cpp index d6bf3776fa..6d3a9d0381 100644 --- a/testing/adios2/engine/bp/TestBPWriteAppendReadADIOS2.cpp +++ b/testing/adios2/engine/bp/TestBPWriteAppendReadADIOS2.cpp @@ -34,7 +34,6 @@ TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteAppendRead2D2x4Test.bp"); const std::string zero = std::to_string(0); const std::string s1_Single = std::string("s1_Single_") + zero; @@ -70,6 +69,9 @@ TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteAppendRead2D2x4Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteAppendRead2D2x4Test.bp"); #endif // Write test data using ADIOS2 @@ -643,7 +645,6 @@ TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendRead2D2x4) // Write with append combined with aggregation, same aggregation ratio TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendReadAggregate) { - const std::string fname("ADIOS2BPWriteAppendReadAggregate.bp"); int mpiRank = 0, mpiSize = 1; const std::size_t Nx = 4; const std::size_t Ny = 2; @@ -652,8 +653,10 @@ TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendReadAggregate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteAppendReadAggregate_MPI.bp"); adios2::ADIOS adios(MPI_COMM_WORLD); #else + const std::string fname("ADIOS2BPWriteAppendReadAggregate.bp"); adios2::ADIOS adios; #endif { @@ -741,7 +744,6 @@ TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendReadAggregate) // Write with append combined with aggregation, same aggregation ratio TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendReadVaryingAggregation) { - const std::string fname("ADIOS2BPWriteAppendReadVaryingAggregate.bp"); int mpiRank = 0, mpiSize = 1; const std::size_t Nx = 4; const std::size_t Ny = 2; @@ -750,8 +752,10 @@ TEST_F(BPWriteAppendReadTestADIOS2, ADIOS2BPWriteAppendReadVaryingAggregation) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteAppendReadVaryingAggregate_MPI.bp"); adios2::ADIOS adios(MPI_COMM_WORLD); #else + const std::string fname("ADIOS2BPWriteAppendReadVaryingAggregate.bp"); adios2::ADIOS adios; #endif { diff --git a/testing/adios2/engine/bp/TestBPWriteFlushRead.cpp b/testing/adios2/engine/bp/TestBPWriteFlushRead.cpp index 0a769fff0b..3d84552918 100644 --- a/testing/adios2/engine/bp/TestBPWriteFlushRead.cpp +++ b/testing/adios2/engine/bp/TestBPWriteFlushRead.cpp @@ -111,9 +111,13 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2D) io2D.DefineVariable("r64", shape, start, count, adios2::ConstantDims); } +#if ADIOS2_USE_MPI + adios2::Engine bpWriter1D = io1D.Open("Flush1D_MPI.bp", adios2::Mode::Write); + adios2::Engine bpWriter2D = io2D.Open("Flush2D_MPI.bp", adios2::Mode::Write); +#else adios2::Engine bpWriter1D = io1D.Open("Flush1D.bp", adios2::Mode::Write); adios2::Engine bpWriter2D = io2D.Open("Flush2D.bp", adios2::Mode::Write); - +#endif for (size_t step = 0; step < NSteps / 2; ++step) { SmallTestData currentTestData = @@ -160,8 +164,11 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2D) io.SetEngine(engineName); } +#if ADIOS2_USE_MPI + adios2::Engine bpReader = io.Open("Flush1D_MPI.bp", adios2::Mode::Read); +#else adios2::Engine bpReader = io.Open("Flush1D.bp", adios2::Mode::Read); - +#endif unsigned int t = 0; while (bpReader.BeginStep(adios2::StepMode::Read, 0.0) == adios2::StepStatus::OK) @@ -312,8 +319,11 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2D) io.SetEngine(engineName); } +#if ADIOS2_USE_MPI + adios2::Engine bpReader = io.Open("Flush2D_MPI.bp", adios2::Mode::Read); +#else adios2::Engine bpReader = io.Open("Flush2D.bp", adios2::Mode::Read); - +#endif unsigned int t = 0; while (bpReader.BeginStep(adios2::StepMode::Read, 0.0) == adios2::StepStatus::OK) @@ -553,8 +563,13 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2Dstdio) io2D.DefineVariable("r64", shape, start, count, adios2::ConstantDims); } +#if ADIOS2_USE_MPI + adios2::Engine bpWriter1D = io1D.Open("Flush1Dstdio_MPI.bp", adios2::Mode::Write); + adios2::Engine bpWriter2D = io2D.Open("Flush2Dstdio_MPI.bp", adios2::Mode::Write); +#else adios2::Engine bpWriter1D = io1D.Open("Flush1Dstdio.bp", adios2::Mode::Write); adios2::Engine bpWriter2D = io2D.Open("Flush2Dstdio.bp", adios2::Mode::Write); +#endif for (size_t step = 0; step < NSteps / 2; ++step) { @@ -602,8 +617,11 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2Dstdio) io.SetEngine(engineName); } +#if ADIOS2_USE_MPI + adios2::Engine bpReader = io.Open("Flush1Dstdio_MPI.bp", adios2::Mode::Read); +#else adios2::Engine bpReader = io.Open("Flush1Dstdio.bp", adios2::Mode::Read); - +#endif unsigned int t = 0; while (bpReader.BeginStep(adios2::StepMode::Read, 0.0) == adios2::StepStatus::OK) @@ -754,7 +772,11 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2Dstdio) io.SetEngine(engineName); } +#if ADIOS2_USE_MPI + adios2::Engine bpReader = io.Open("Flush2Dstdio_MPI.bp", adios2::Mode::Read); +#else adios2::Engine bpReader = io.Open("Flush2Dstdio.bp", adios2::Mode::Read); +#endif unsigned int t = 0; @@ -995,8 +1017,13 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2Dfstream) io2D.DefineVariable("r64", shape, start, count, adios2::ConstantDims); } +#if ADIOS2_USE_MPI + adios2::Engine bpWriter1D = io1D.Open("Flush1Dfstream_MPI.bp", adios2::Mode::Write); + adios2::Engine bpWriter2D = io2D.Open("Flush2Dfstream_MPI.bp", adios2::Mode::Write); +#else adios2::Engine bpWriter1D = io1D.Open("Flush1Dfstream.bp", adios2::Mode::Write); adios2::Engine bpWriter2D = io2D.Open("Flush2Dfstream.bp", adios2::Mode::Write); +#endif for (size_t step = 0; step < NSteps / 2; ++step) { @@ -1044,7 +1071,11 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2Dfstream) io.SetEngine(engineName); } +#if ADIOS2_USE_MPI + adios2::Engine bpReader = io.Open("Flush1Dfstream_MPI.bp", adios2::Mode::Read); +#else adios2::Engine bpReader = io.Open("Flush1Dfstream.bp", adios2::Mode::Read); +#endif unsigned int t = 0; @@ -1196,7 +1227,11 @@ TEST_F(BPWriteFlushRead, ADIOS2BPWrite1D2Dfstream) io.SetEngine(engineName); } +#if ADIOS2_USE_MPI + adios2::Engine bpReader = io.Open("Flush2Dfstream_MPI.bp", adios2::Mode::Read); +#else adios2::Engine bpReader = io.Open("Flush2Dfstream.bp", adios2::Mode::Read); +#endif unsigned int t = 0; diff --git a/testing/adios2/engine/bp/TestBPWriteMemorySelectionRead.cpp b/testing/adios2/engine/bp/TestBPWriteMemorySelectionRead.cpp index aa8d516b21..fa70fd610e 100644 --- a/testing/adios2/engine/bp/TestBPWriteMemorySelectionRead.cpp +++ b/testing/adios2/engine/bp/TestBPWriteMemorySelectionRead.cpp @@ -174,7 +174,6 @@ MPI_Comm testComm; void BPSteps1D(const size_t ghostCells) { - const std::string fname("BPSteps1D_" + std::to_string(ghostCells)); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -186,6 +185,9 @@ void BPSteps1D(const size_t ghostCells) #if ADIOS2_USE_MPI MPI_Comm_rank(testComm, &mpiRank); MPI_Comm_size(testComm, &mpiSize); + const std::string fname("BPSteps1D_" + std::to_string(ghostCells) + "_MPI"); +#else + const std::string fname("BPSteps1D_" + std::to_string(ghostCells)); #endif #if ADIOS2_USE_MPI @@ -386,7 +388,6 @@ void BPSteps1D(const size_t ghostCells) void BPSteps2D4x2(const size_t ghostCells) { - const std::string fname("BPSteps2D4x2_" + std::to_string(ghostCells)); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -402,6 +403,9 @@ void BPSteps2D4x2(const size_t ghostCells) #if ADIOS2_USE_MPI MPI_Comm_rank(testComm, &mpiRank); MPI_Comm_size(testComm, &mpiSize); + const std::string fname("BPSteps2D4x2_" + std::to_string(ghostCells) + "_MPI"); +#else + const std::string fname("BPSteps2D4x2_" + std::to_string(ghostCells)); #endif #if ADIOS2_USE_MPI @@ -612,7 +616,6 @@ void BPSteps2D4x2(const size_t ghostCells) void BPSteps3D8x2x4(const size_t ghostCells) { - const std::string fname("BPSteps3D8x2x4_" + std::to_string(ghostCells)); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -630,6 +633,9 @@ void BPSteps3D8x2x4(const size_t ghostCells) #if ADIOS2_USE_MPI MPI_Comm_rank(testComm, &mpiRank); MPI_Comm_size(testComm, &mpiSize); + const std::string fname("BPSteps3D8x2x4_" + std::to_string(ghostCells) + "_MPI"); +#else + const std::string fname("BPSteps3D8x2x4_" + std::to_string(ghostCells)); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/TestBPWriteMultiblockRead.cpp b/testing/adios2/engine/bp/TestBPWriteMultiblockRead.cpp index 4edc0b8566..79b325bdfc 100644 --- a/testing/adios2/engine/bp/TestBPWriteMultiblockRead.cpp +++ b/testing/adios2/engine/bp/TestBPWriteMultiblockRead.cpp @@ -32,7 +32,6 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteMultiblockRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteMultiblockRead1D8.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -44,6 +43,9 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteMultiblockRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteMultiblockRead1D8_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteMultiblockRead1D8.bp"); #endif // Write test data using BP @@ -369,7 +371,6 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteMultiblockRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteMultiblockRead2D2x4Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -384,6 +385,9 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteMultiblockRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteMultiblockRead2D2x4Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteMultiblockRead2D2x4Test.bp"); #endif // Write test data using ADIOS2 @@ -711,7 +715,6 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteMultiblockRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteMultiblockRead2D4x2Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -725,6 +728,9 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteMultiblockRead2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteMultiblockRead2D4x2Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteMultiblockRead2D4x2Test.bp"); #endif // Write test data using ADIOS2 @@ -1039,7 +1045,6 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteRead1D8ZeroBlock) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteRead1DZeroBlock.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1051,6 +1056,9 @@ TEST_F(BPWriteMultiblockReadTest, ADIOS2BPWriteRead1D8ZeroBlock) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead1DZeroBlock_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead1DZeroBlock.bp"); #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPWriteNull.cpp b/testing/adios2/engine/bp/TestBPWriteNull.cpp index 43f16a59ac..ab20533216 100644 --- a/testing/adios2/engine/bp/TestBPWriteNull.cpp +++ b/testing/adios2/engine/bp/TestBPWriteNull.cpp @@ -33,7 +33,6 @@ TEST_F(BPWriteNullTest, BPWrite1D1x8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteNull.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -45,6 +44,9 @@ TEST_F(BPWriteNullTest, BPWrite1D1x8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteNull_MPI.bp"); +#else + const std::string fname("BPWriteNull.bp"); #endif // Write test data using BP @@ -317,7 +319,6 @@ TEST_F(BPWriteNullTest, BPWrite2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("BPWrite2D4x2TestNull.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -331,6 +332,9 @@ TEST_F(BPWriteNullTest, BPWrite2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWrite2D4x2TestNull_MPI.bp"); +#else + const std::string fname("BPWrite2D4x2TestNull.bp"); #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/bp/TestBPWriteProfilingJSON.cpp b/testing/adios2/engine/bp/TestBPWriteProfilingJSON.cpp index 1c2ef91ee3..959106b3b1 100644 --- a/testing/adios2/engine/bp/TestBPWriteProfilingJSON.cpp +++ b/testing/adios2/engine/bp/TestBPWriteProfilingJSON.cpp @@ -43,7 +43,6 @@ TEST_F(BPWriteProfilingJSONTest, DISABLED_ADIOS2BPWriteProfilingJSON) { // Use a relative path + file name to test path in file name capability std::string fname; - fname = "foo/ADIOS2BPWriteProfilingJSON.bp"; int mpiRank = 0, mpiSize = 1; // Number of rows @@ -55,6 +54,9 @@ TEST_F(BPWriteProfilingJSONTest, DISABLED_ADIOS2BPWriteProfilingJSON) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + fname = "foo/ADIOS2BPWriteProfilingJSON_MPI.bp"; +#else + fname = "foo/ADIOS2BPWriteProfilingJSON.bp"; #endif // Write test data and profiling.json using ADIOS2 diff --git a/testing/adios2/engine/bp/TestBPWriteReadADIOS2.cpp b/testing/adios2/engine/bp/TestBPWriteReadADIOS2.cpp index ef44278fdb..5bcb64af4b 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadADIOS2.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadADIOS2.cpp @@ -35,7 +35,6 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteRead1D8.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -47,6 +46,9 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead1D8_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead1D8.bp"); #endif // Write test data using BP @@ -419,7 +421,6 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteRead2D2x4Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -434,6 +435,9 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D2x4Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D2x4Test.bp"); #endif // Write test data using ADIOS2 @@ -739,7 +743,6 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -753,6 +756,9 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Test.bp"); #endif // Write test data using ADIOS2 @@ -1045,7 +1051,6 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead10D2x2) { // Each process would write a 2x2x...x2 9D array and all processes would // form a 10D NumberOfProcess x 2 x ... x 2) array - const std::string fname("ADIOS2BPWriteRead10D2x2Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of steps @@ -1054,6 +1059,9 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead10D2x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead10D2x2Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead10D2x2Test.bp"); #endif size_t NX = static_cast(mpiSize); @@ -1227,7 +1235,6 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D4x2_ReadMultiSteps) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiSteps.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1242,6 +1249,9 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D4x2_ReadMultiSteps) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiSteps_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiSteps.bp"); #endif // Write test data using ADIOS2 @@ -1534,7 +1544,6 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D4x2_MultiStepsOverflow) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflow.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1549,6 +1558,9 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteRead2D4x2_MultiStepsOverflow) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflow_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflow.bp"); #endif // Write test data using ADIOS2 @@ -1790,7 +1802,6 @@ TEST_F(BPWriteReadTestADIOS2, ReadStartCount) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ReadStartCount.bp"); int mpiRank = 0, mpiSize = 1; @@ -1799,6 +1810,9 @@ TEST_F(BPWriteReadTestADIOS2, ReadStartCount) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ReadStartCount_MPI.bp"); +#else + const std::string fname("ReadStartCount.bp"); #endif std::vector localData(Nx); @@ -2027,7 +2041,6 @@ TEST_F(BPWriteReadTestADIOS2, ADIOS2BPWriteReadEmptyProcess) TEST_F(BPWriteReadTestADIOS2, GetDeferredInClose) { // Test if Get() will retrieve data in Close() - const std::string fname("GetDeferredInClose.bp"); int mpiRank = 0, mpiSize = 1; @@ -2036,6 +2049,9 @@ TEST_F(BPWriteReadTestADIOS2, GetDeferredInClose) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("GetDeferredInClose_MPI.bp"); +#else + const std::string fname("GetDeferredInClose.bp"); #endif std::vector localData(Nx); @@ -2095,7 +2111,6 @@ TEST_F(BPWriteReadTestADIOS2, GetDeferredInClose) TEST_F(BPWriteReadTestADIOS2, GetDeferredInEndStep) { // Test if Get() will retrieve data in EndStep() - const std::string fname("GetDeferredInEndStep.bp"); int mpiRank = 0, mpiSize = 1; @@ -2104,6 +2119,9 @@ TEST_F(BPWriteReadTestADIOS2, GetDeferredInEndStep) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("GetDeferredInEndStep_MPI.bp"); +#else + const std::string fname("GetDeferredInEndStep.bp"); #endif std::vector localData(Nx); @@ -2164,7 +2182,6 @@ TEST_F(BPWriteReadTestADIOS2, GetDeferredInEndStep) TEST_F(BPWriteReadTestADIOS2, GetDeferredWithoutEndStep) { // Test if Get() will retrieve data in Close() when EndStep() is not called - const std::string fname("GetDeferredWithoutEndStep.bp"); int mpiRank = 0, mpiSize = 1; @@ -2173,6 +2190,9 @@ TEST_F(BPWriteReadTestADIOS2, GetDeferredWithoutEndStep) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("GetDeferredWithoutEndStep_MPI.bp"); +#else + const std::string fname("GetDeferredWithoutEndStep.bp"); #endif std::vector localData(Nx); diff --git a/testing/adios2/engine/bp/TestBPWriteReadADIOS2fstream.cpp b/testing/adios2/engine/bp/TestBPWriteReadADIOS2fstream.cpp index 9eaab05c5a..db8116021e 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadADIOS2fstream.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadADIOS2fstream.cpp @@ -33,7 +33,6 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteRead1D8fstream.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -45,6 +44,9 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead1D8fstream_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead1D8fstream.bp"); #endif // Write test data using BP @@ -370,7 +372,6 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteRead2D2x4Testfstream.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -385,6 +386,9 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D2x4Testfstream_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D2x4Testfstream.bp"); #endif // Write test data using ADIOS2 @@ -711,7 +715,6 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Testfstream.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -725,6 +728,9 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Testfstream_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Testfstream.bp"); #endif // Write test data using ADIOS2 @@ -1037,7 +1043,6 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D4x2_ReadMultiSteps) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiStepsfstream.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1052,6 +1057,9 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D4x2_ReadMultiSteps) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiStepsfstream_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiStepsfstream.bp"); #endif // Write test data using ADIOS2 @@ -1364,7 +1372,6 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D4x2_MultiStepsOverflow) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflowfstream.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1379,6 +1386,9 @@ TEST_F(BPWriteReadTestADIOS2fstream, ADIOS2BPWriteRead2D4x2_MultiStepsOverflow) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflowfstream_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflowfstream.bp"); #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/bp/TestBPWriteReadADIOS2stdio.cpp b/testing/adios2/engine/bp/TestBPWriteReadADIOS2stdio.cpp index af8b80c411..c4b259f12a 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadADIOS2stdio.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadADIOS2stdio.cpp @@ -33,7 +33,6 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteRead1D8stdio.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -45,6 +44,9 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead1D8stdio_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead1D8stdio.bp"); #endif // Write test data using BP @@ -369,7 +371,6 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteRead2D2x4Teststdio.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -384,6 +385,9 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D2x4Teststdio_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D2x4Teststdio.bp"); #endif // Write test data using ADIOS2 @@ -710,7 +714,6 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Teststdio.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -724,6 +727,9 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Teststdio_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Teststdio.bp"); #endif // Write test data using ADIOS2 @@ -1036,7 +1042,6 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D4x2_ReadMultiSteps) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiStepsstdio.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1051,6 +1056,9 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D4x2_ReadMultiSteps) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiStepsstdio_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Test_ReadMultiStepsstdio.bp"); #endif // Write test data using ADIOS2 @@ -1363,7 +1371,6 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D4x2_MultiStepsOverflow) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflowstdio.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1378,6 +1385,9 @@ TEST_F(BPWriteReadTestADIOS2stdio, ADIOS2BPWriteRead2D4x2_MultiStepsOverflow) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflowstdio_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteRead2D4x2Test_Overflowstdio.bp"); #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2.cpp b/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2.cpp index 2089eff466..e6e4c03347 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2.cpp @@ -28,7 +28,6 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ADIOS2BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadAsStream1D8.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -38,8 +37,11 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ADIOS2BPWriteRead1D8) const size_t NSteps = 5; #if ADIOS2_USE_MPI + const std::string fname("ADIOS2BPWriteReadAsStream1D8_MPI.bp"); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); +#else + const std::string fname("ADIOS2BPWriteReadAsStream1D8.bp"); #endif // Write test data using BP @@ -383,7 +385,6 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ADIOS2BPWriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteReadAsStream2D2x4Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -396,8 +397,11 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ADIOS2BPWriteRead2D2x4) const std::size_t NSteps = 3; #if ADIOS2_USE_MPI + const std::string fname("ADIOS2BPWriteReadAsStream2D2x4Test_MPI.bp"); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); +#else + const std::string fname("ADIOS2BPWriteReadAsStream2D2x4Test.bp"); #endif // Write test data using ADIOS2 @@ -624,7 +628,6 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ADIOS2BPWriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteReadAsStream2D4x2Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -636,8 +639,11 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ADIOS2BPWriteRead2D4x2) const std::size_t NSteps = 3; #if ADIOS2_USE_MPI + const std::string fname("ADIOS2BPWriteReadAsStream2D4x2Test_MPI.bp"); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); +#else + const std::string fname("ADIOS2BPWriteReadAsStream2D4x2Test.bp"); #endif // Write test data using ADIOS2 @@ -869,8 +875,6 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ADIOS2BPWriteRead2D4x2) TEST_F(BPWriteReadAsStreamTestADIOS2, ReaderWriterDefineVariable) { - const std::string fnameFloat("BPReaderWriterDefineVariable_float.bp"); - const std::string fname("BPReaderWriterDefineVariable_all.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -882,8 +886,13 @@ TEST_F(BPWriteReadAsStreamTestADIOS2, ReaderWriterDefineVariable) const std::size_t NSteps = 3; #if ADIOS2_USE_MPI + const std::string fnameFloat("BPReaderWriterDefineVariable_float_MPI.bp"); + const std::string fname("BPReaderWriterDefineVariable_all_MPI.bp"); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); +#else + const std::string fnameFloat("BPReaderWriterDefineVariable_float.bp"); + const std::string fname("BPReaderWriterDefineVariable_all.bp"); #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2_Threads.cpp b/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2_Threads.cpp index 87aea696d8..50744630fc 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2_Threads.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadAsStreamADIOS2_Threads.cpp @@ -32,7 +32,6 @@ TEST_F(BPWriteReadAsStreamTestADIOS2_Threads, ADIOS2BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadAsStream_Threads1D8.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -44,6 +43,9 @@ TEST_F(BPWriteReadAsStreamTestADIOS2_Threads, ADIOS2BPWriteRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadAsStream_Threads1D8_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadAsStream_Threads1D8.bp"); #endif // Write test data using BP @@ -266,7 +268,6 @@ TEST_F(BPWriteReadAsStreamTestADIOS2_Threads, ADIOS2BPWriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteReadAsStream_Threads2D2x4Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -281,6 +282,9 @@ TEST_F(BPWriteReadAsStreamTestADIOS2_Threads, ADIOS2BPWriteRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadAsStream_Threads2D2x4Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadAsStream_Threads2D2x4Test.bp"); #endif // Write test data using ADIOS2 @@ -513,7 +517,6 @@ TEST_F(BPWriteReadAsStreamTestADIOS2_Threads, ADIOS2BPWriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteReadAsStream_Threads2D4x2Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -527,6 +530,9 @@ TEST_F(BPWriteReadAsStreamTestADIOS2_Threads, ADIOS2BPWriteRead2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadAsStream_Threads2D4x2Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadAsStream_Threads2D4x2Test.bp"); #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/bp/TestBPWriteReadAttributes.cpp b/testing/adios2/engine/bp/TestBPWriteReadAttributes.cpp index 5195fdb902..70da65f48c 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadAttributes.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadAttributes.cpp @@ -28,8 +28,6 @@ class BPWriteReadAttributes : public ::testing::Test // ADIOS2 write, read for single value attributes TEST_F(BPWriteReadAttributes, WriteReadSingleTypes) { - const std::string fName = - "foo" + std::string(&adios2::PathSeparator, 1) + "WriteAttributeReadSingleTypes.bp"; const std::string zero = std::to_string(0); const std::string s1_Single = std::string("s1_Single_") + zero; @@ -56,7 +54,11 @@ TEST_F(BPWriteReadAttributes, WriteReadSingleTypes) // Write test data using BP #if ADIOS2_USE_MPI adios2::ADIOS adios(MPI_COMM_WORLD); + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "WriteAttributeReadSingleTypes_MPI.bp"; #else + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "WriteAttributeReadSingleTypes.bp"; adios2::ADIOS adios; #endif { @@ -227,13 +229,16 @@ TEST_F(BPWriteReadAttributes, WriteReadSingleTypes) // ADIOS2 write read for array attributes TEST_F(BPWriteReadAttributes, WriteReadArrayTypes) { - const std::string fName = - "foo" + std::string(&adios2::PathSeparator, 1) + "WriteAttributeReadArrayTypes.bp"; #if ADIOS2_USE_MPI int mpiRank = 0, mpiSize = 1; MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "WriteAttributeReadArrayTypes_MPI.bp"; +#else + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "WriteAttributeReadArrayTypes.bp"; #endif const std::string zero = std::to_string(0); @@ -443,8 +448,6 @@ TEST_F(BPWriteReadAttributes, WriteReadArrayTypes) TEST_F(BPWriteReadAttributes, BPWriteReadSingleTypesVar) { - const std::string fName = - "foo" + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadSingleTypesVar.bp"; const std::string zero = std::to_string(0); const std::string s1_Single = std::string("s1_Single_") + zero; @@ -472,7 +475,11 @@ TEST_F(BPWriteReadAttributes, BPWriteReadSingleTypesVar) // Write test data using BP #if ADIOS2_USE_MPI adios2::ADIOS adios(MPI_COMM_WORLD); + const std::string fName = "foo" + std::string(&adios2::PathSeparator, 1) + + "BPWriteAttributeReadSingleTypesVar_MPI.bp"; #else + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadSingleTypesVar.bp"; adios2::ADIOS adios; #endif { @@ -630,13 +637,16 @@ TEST_F(BPWriteReadAttributes, BPWriteReadSingleTypesVar) // ADIOS2 write read for array attributes TEST_F(BPWriteReadAttributes, WriteReadArrayTypesVar) { - const std::string fName = - "foo" + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadArrayTypesVar.bp"; #if ADIOS2_USE_MPI int mpiRank = 0, mpiSize = 1; MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadArrayTypesVar_MPI.bp"; +#else + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadArrayTypesVar.bp"; #endif const std::string zero = std::to_string(0); @@ -849,8 +859,6 @@ TEST_F(BPWriteReadAttributes, WriteReadArrayTypesVar) TEST_F(BPWriteReadAttributes, WriteReadStreamVarp) { - const std::string fName = - "foo" + std::string(&adios2::PathSeparator, 1) + "AttributesWriteReadVar.bp"; const std::string separator = "\\"; @@ -864,6 +872,11 @@ TEST_F(BPWriteReadAttributes, WriteReadStreamVarp) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "AttributesWriteReadVar_MPI.bp"; +#else + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "AttributesWriteReadVar.bp"; #endif SmallTestData currentTestData = generateNewSmallTestData(m_TestData, 0, 0, 0); @@ -1003,8 +1016,6 @@ TEST_F(BPWriteReadAttributes, WriteReadStreamVarp) TEST_F(BPWriteReadAttributes, WriteReadStreamModifiable) { - const std::string fName = - "foo" + std::string(&adios2::PathSeparator, 1) + "AttributesWriteReadModifiable.bp"; const std::string separator = "\\"; @@ -1018,6 +1029,11 @@ TEST_F(BPWriteReadAttributes, WriteReadStreamModifiable) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "AttributesWriteReadModifiable_MPI.bp"; +#else + const std::string fName = + "foo" + std::string(&adios2::PathSeparator, 1) + "AttributesWriteReadModifiable.bp"; #endif const double d3[3] = {-1.1, -1.2, -1.3}; diff --git a/testing/adios2/engine/bp/TestBPWriteReadCuda.cpp b/testing/adios2/engine/bp/TestBPWriteReadCuda.cpp index a3f1797e34..880c55de35 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadCuda.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadCuda.cpp @@ -292,7 +292,6 @@ void CUDAWriteReadMemorySelection() void CUDAWriteReadMPI1D(const std::string mode) { - const std::string fname("BPWRCU1D_" + mode + ".bp"); adios2::Mode ioMode = adios2::Mode::Deferred; if (mode == "Sync") ioMode = adios2::Mode::Sync; @@ -306,6 +305,9 @@ void CUDAWriteReadMPI1D(const std::string mode) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRCU1D_" + mode + "_MPI.bp"); +#else + const std::string fname("BPWRCU1D_" + mode + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/TestBPWriteReadLocalVariables.cpp b/testing/adios2/engine/bp/TestBPWriteReadLocalVariables.cpp index 9fd6c2d71a..05c23ccbfc 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadLocalVariables.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadLocalVariables.cpp @@ -29,7 +29,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1D) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal1D.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -41,6 +40,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1D) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal1D_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal1D.bp"); #endif // Write test data using BP @@ -418,7 +420,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D2x4) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal2D2x4.bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 4; @@ -430,6 +431,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal2D2x4_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal2D2x4.bp"); #endif // Write test data using BP @@ -793,7 +797,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D4x2) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal2D4x2.bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 2; @@ -805,6 +808,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal2D4x2_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal2D4x2.bp"); #endif // Write test data using BP @@ -1169,7 +1175,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DAllSteps) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal1DAllSteps.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1181,6 +1186,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DAllSteps) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal1DAllSteps_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal1DAllSteps.bp"); #endif // Write test data using BP @@ -1443,7 +1451,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DBlockInfo) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal1DBlockInfo.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1455,6 +1462,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DBlockInfo) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal1DBlockInfo_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal1DBlockInfo.bp"); #endif // Write test data using BP @@ -1721,13 +1731,14 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DSubFile) TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2DChangeCount) { - const std::string fname("BPWRLocal2DChangeCount_" + engineName + ".bp"); - int mpiRank = 0; int mpiSize = 1; #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRLocal2DChangeCount_" + engineName + "_MPI.bp"); +#else + const std::string fname("BPWRLocal2DChangeCount_" + engineName + ".bp"); #endif const size_t Nx0 = static_cast(std::pow(2 - mpiRank, 2)) + 1; diff --git a/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSel.cpp b/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSel.cpp index 558e55014a..399b1b6fd2 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSel.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSel.cpp @@ -28,7 +28,6 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal1DSel) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadLocal1DSel.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -40,6 +39,9 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal1DSel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadLocal1DSel_MPI.bp"); +#else + const std::string fname("BPWriteReadLocal1DSel.bp"); #endif // Write test data using BP @@ -440,7 +442,6 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal2D2x4Sel) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadLocal2D2x4Sel.bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 4; @@ -452,6 +453,9 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal2D2x4Sel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadLocal2D2x4Sel_MPI.bp"); +#else + const std::string fname("BPWriteReadLocal2D2x4Sel.bp"); #endif // Write test data using BP @@ -899,7 +903,6 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal2D4x2Sel) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadLocal2D4x2Sel.bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 2; @@ -911,6 +914,9 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal2D4x2Sel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadLocal2D4x2Sel_MPI.bp"); +#else + const std::string fname("BPWriteReadLocal2D4x2Sel.bp"); #endif // Write test data using BP @@ -1357,7 +1363,6 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal1DAllStepsSel) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadLocal1DAllStepsSel.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1369,6 +1374,9 @@ TEST_F(BPWriteReadLocalVariablesSel, BPWriteReadLocal1DAllStepsSel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadLocal1DAllStepsSel_MPI.bp"); +#else + const std::string fname("BPWriteReadLocal1DAllStepsSel.bp"); #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSelHighLevel.cpp b/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSelHighLevel.cpp index eaa9d3fdfb..525a955ccc 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSelHighLevel.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadLocalVariablesSelHighLevel.cpp @@ -28,7 +28,6 @@ TEST_F(BPWriteReadLocalVariablesSelHighLevel, BPWriteReadLocal1DSel) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadLocal1DSelHighLevel.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -40,6 +39,9 @@ TEST_F(BPWriteReadLocalVariablesSelHighLevel, BPWriteReadLocal1DSel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadLocal1DSelHighLevel_MPI.bp"); +#else + const std::string fname("BPWriteReadLocal1DSelHighLevel.bp"); #endif // Write test data using BP @@ -168,7 +170,6 @@ TEST_F(BPWriteReadLocalVariablesSelHighLevel, BPWriteReadLocal1DSel) TEST_F(BPWriteReadLocalVariablesSelHighLevel, BPWriteReadLocal2D2x4Sel) { - const std::string fname("BPWriteReadLocal2D2x4SelHighLevel.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -181,6 +182,9 @@ TEST_F(BPWriteReadLocalVariablesSelHighLevel, BPWriteReadLocal2D2x4Sel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadLocal2D2x4SelHighLevel_MPI.bp"); +#else + const std::string fname("BPWriteReadLocal2D2x4SelHighLevel.bp"); #endif // Write test data using BP @@ -332,7 +336,6 @@ TEST_F(BPWriteReadLocalVariablesSelHighLevel, BPWriteReadLocal1DAllStepsSel) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadLocal1DAllStepsSelHighLevel.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -344,6 +347,9 @@ TEST_F(BPWriteReadLocalVariablesSelHighLevel, BPWriteReadLocal1DAllStepsSel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadLocal1DAllStepsSelHighLevel_MPI.bp"); +#else + const std::string fname("BPWriteReadLocal1DAllStepsSelHighLevel.bp"); #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPWriteReadMultiblock.cpp b/testing/adios2/engine/bp/TestBPWriteReadMultiblock.cpp index 5014ba6335..0edfffc738 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadMultiblock.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadMultiblock.cpp @@ -33,7 +33,6 @@ TEST_F(BPWriteReadMultiblockTest, ADIOS2BPWriteReadMultiblock1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadMultiblock1D8.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -45,6 +44,9 @@ TEST_F(BPWriteReadMultiblockTest, ADIOS2BPWriteReadMultiblock1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadMultiblock1D8_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadMultiblock1D8.bp"); #endif // Write test data using BP @@ -773,7 +775,6 @@ TEST_F(BPWriteReadMultiblockTest, ADIOS2BPWriteReadMultiblock2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteReadMultiblock2D2x4Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -788,6 +789,9 @@ TEST_F(BPWriteReadMultiblockTest, ADIOS2BPWriteReadMultiblock2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadMultiblock2D2x4Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadMultiblock2D2x4Test.bp"); #endif // Write test data using ADIOS2 @@ -1182,7 +1186,6 @@ TEST_F(BPWriteReadMultiblockTest, ADIOS2BPWriteReadMultiblock2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteReadMultiblock2D4x2Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1196,6 +1199,9 @@ TEST_F(BPWriteReadMultiblockTest, ADIOS2BPWriteReadMultiblock2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadMultiblock2D4x2Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadMultiblock2D4x2Test.bp"); #endif // Write test data using ADIOS2 @@ -2103,7 +2109,6 @@ TEST_F(BPWriteReadMultiblockTest, MultiblockNullBlocks) { // Each process would write a 2x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("MultiblockNullBlocks.bp"); int mpiRank = 0, mpiSize = 1; // Number of elements per blocks (blocksize) @@ -2116,6 +2121,9 @@ TEST_F(BPWriteReadMultiblockTest, MultiblockNullBlocks) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("MultiblockNullBlocks_MPI.bp"); +#else + const std::string fname("MultiblockNullBlocks.bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/TestBPWriteReadVariableSpan.cpp b/testing/adios2/engine/bp/TestBPWriteReadVariableSpan.cpp index 57fe702338..4043a4c3d2 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadVariableSpan.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadVariableSpan.cpp @@ -28,7 +28,6 @@ TEST_F(BPWriteReadSpan, BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadSpan1D8.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -40,6 +39,9 @@ TEST_F(BPWriteReadSpan, BPWriteRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadSpan1D8_MPI.bp"); +#else + const std::string fname("BPWriteReadSpan1D8.bp"); #endif #if ADIOS2_USE_MPI @@ -317,7 +319,6 @@ TEST_F(BPWriteReadSpan, BPWriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("BPWriteReadSpan2D2x4.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -332,6 +333,9 @@ TEST_F(BPWriteReadSpan, BPWriteRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadSpan2D2x4_MPI.bp"); +#else + const std::string fname("BPWriteReadSpan2D2x4.bp"); #endif // Write test data using ADIOS2 @@ -628,7 +632,6 @@ TEST_F(BPWriteReadSpan, BPWriteRead2D2x4) TEST_F(BPWriteReadSpan, BPWriteRead1D8Local) { - const std::string fname("BPWriteReadSpan1D8Local.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -640,6 +643,9 @@ TEST_F(BPWriteReadSpan, BPWriteRead1D8Local) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadSpan1D8Local_MPI.bp"); +#else + const std::string fname("BPWriteReadSpan1D8Local.bp"); #endif // Write test data using BP @@ -877,7 +883,6 @@ TEST_F(BPWriteReadSpan, BPWriteRead2D2x4Local) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("BPWriteReadSpan2D2x4Local.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -892,6 +897,9 @@ TEST_F(BPWriteReadSpan, BPWriteRead2D2x4Local) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadSpan2D2x4Local_MPI.bp"); +#else + const std::string fname("BPWriteReadSpan2D2x4Local.bp"); #endif // Write test data using ADIOS2 @@ -1156,7 +1164,6 @@ TEST_F(BPWriteReadSpan, BPWriteRead1D8FillValue) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWriteReadSpan1D8FillValue.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1168,6 +1175,9 @@ TEST_F(BPWriteReadSpan, BPWriteRead1D8FillValue) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadSpan1D8FillValue_MPI.bp"); +#else + const std::string fname("BPWriteReadSpan1D8FillValue.bp"); #endif // Write test data using BP @@ -1469,7 +1479,6 @@ TEST_F(BPWriteReadSpan, BPWriteRead1D8FillValue) #ifdef ADIOS2_HAVE_BZIP2 TEST_F(BPWriteReadSpan, BPWriteSpanOperatorException) { - const std::string fname("BPWriteSpanOperatorException.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1481,6 +1490,9 @@ TEST_F(BPWriteReadSpan, BPWriteSpanOperatorException) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteSpanOperatorException_MPI.bp"); +#else + const std::string fname("BPWriteSpanOperatorException.bp"); #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/TestBPWriteReadVector.cpp b/testing/adios2/engine/bp/TestBPWriteReadVector.cpp index 595a6c5578..9baeb7369e 100644 --- a/testing/adios2/engine/bp/TestBPWriteReadVector.cpp +++ b/testing/adios2/engine/bp/TestBPWriteReadVector.cpp @@ -32,7 +32,6 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadVector1D8.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -44,6 +43,9 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteRead1D8) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadVector1D8_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadVector1D8.bp"); #endif // Write test data using BP @@ -342,7 +344,6 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2BPWriteReadVector2D2x4Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -357,6 +358,9 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteRead2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadVector2D2x4Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadVector2D2x4Test.bp"); #endif // Write test data using ADIOS2 @@ -655,7 +659,6 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteReadVector2D4x2Test.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -669,6 +672,9 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteRead2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadVector2D4x2Test_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadVector2D4x2Test.bp"); #endif // Write test data using ADIOS2 @@ -952,7 +958,6 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteReadVector2D4x2_MultiSteps) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2BPWriteReadVector2D4x2_MultiSteps.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -967,6 +972,9 @@ TEST_F(BPWriteReadVector, ADIOS2BPWriteReadVector2D4x2_MultiSteps) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadVector2D4x2_MultiSteps_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadVector2D4x2_MultiSteps.bp"); #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadBZIP2.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadBZIP2.cpp index dcbafcccf0..651531ff22 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadBZIP2.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadBZIP2.cpp @@ -19,7 +19,6 @@ void BZIP2Accuracy1D(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWR_BZIP2_1D_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -38,6 +37,9 @@ void BZIP2Accuracy1D(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWR_BZIP2_1D_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWR_BZIP2_1D_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -152,7 +154,6 @@ void BZIP2Accuracy1DLocal(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // write a Nx 1D array - const std::string fname("BPWR_BZIP2_1D_Local_" + accuracy + ".bp"); int mpiRank = 0; // Number of rows @@ -170,6 +171,9 @@ void BZIP2Accuracy1DLocal(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); + const std::string fname("BPWR_BZIP2_1D_Local_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWR_BZIP2_1D_Local_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -280,7 +284,6 @@ void BZIP2Accuracy2D(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBZIP22D_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -300,6 +303,9 @@ void BZIP2Accuracy2D(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBZIP22D_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRBZIP22D_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -414,7 +420,6 @@ void BZIP2Accuracy3D(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBZIP23D_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -435,6 +440,9 @@ void BZIP2Accuracy3D(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBZIP23D_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRBZIP23D_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -551,7 +559,6 @@ void BZIP2Accuracy1DSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBZIP21DSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -570,6 +577,9 @@ void BZIP2Accuracy1DSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBZIP21DSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRBZIP21DSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -682,7 +692,6 @@ void BZIP2Accuracy2DSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBZIP22DSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -702,6 +711,9 @@ void BZIP2Accuracy2DSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBZIP22DSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRBZIP22DSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -816,7 +828,6 @@ void BZIP2Accuracy3DSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBZIP23DSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -837,6 +848,9 @@ void BZIP2Accuracy3DSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBZIP23DSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRBZIP23DSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc.cpp index 4f711437d8..09908075cb 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc.cpp @@ -21,8 +21,6 @@ void BloscAccuracy1D(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWR_Blosc_1D_" + accuracy + "_" + threshold + "_" + doshuffle + - ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -41,6 +39,11 @@ void BloscAccuracy1D(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWR_Blosc_1D_" + accuracy + "_" + threshold + "_" + doshuffle + + "_MPI.bp"); +#else + const std::string fname("BPWR_Blosc_1D_" + accuracy + "_" + threshold + "_" + doshuffle + + ".bp"); #endif #if ADIOS2_USE_MPI @@ -160,8 +163,6 @@ void BloscAccuracy2D(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc2D_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -181,6 +182,11 @@ void BloscAccuracy2D(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc2D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc2D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -300,8 +306,6 @@ void BloscAccuracy3D(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc3D_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -322,6 +326,11 @@ void BloscAccuracy3D(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc3D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc3D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -443,8 +452,6 @@ void BloscAccuracy1DSel(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc1DSel_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -463,6 +470,11 @@ void BloscAccuracy1DSel(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc1DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc1DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -580,8 +592,6 @@ void BloscAccuracy2DSel(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc2DSel_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -601,6 +611,11 @@ void BloscAccuracy2DSel(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc2DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc2DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -720,8 +735,6 @@ void BloscAccuracy3DSel(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc3DSel_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -742,6 +755,11 @@ void BloscAccuracy3DSel(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc3DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc3DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc2.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc2.cpp index 9e72a351b5..e6852d3471 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc2.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadBlosc2.cpp @@ -21,8 +21,6 @@ void Blosc2Accuracy1D(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWR_Blosc2_1D_" + accuracy + "_" + threshold + "_" + doshuffle + - ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -41,6 +39,11 @@ void Blosc2Accuracy1D(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWR_Blosc2_1D_" + accuracy + "_" + threshold + "_" + doshuffle + + "_MPI.bp"); +#else + const std::string fname("BPWR_Blosc2_1D_" + accuracy + "_" + threshold + "_" + doshuffle + + ".bp"); #endif #if ADIOS2_USE_MPI @@ -160,8 +163,6 @@ void Blosc2Accuracy2D(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc22D_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -181,6 +182,11 @@ void Blosc2Accuracy2D(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc22D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc22D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -300,8 +306,6 @@ void Blosc2Accuracy3D(const std::string accuracy, const std::string threshold, { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc23D_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -322,6 +326,11 @@ void Blosc2Accuracy3D(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc23D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc23D_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -443,8 +452,6 @@ void Blosc2Accuracy1DSel(const std::string accuracy, const std::string threshold { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc21DSel_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -463,6 +470,11 @@ void Blosc2Accuracy1DSel(const std::string accuracy, const std::string threshold #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc21DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc21DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -580,8 +592,6 @@ void Blosc2Accuracy2DSel(const std::string accuracy, const std::string threshold { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc22DSel_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -601,6 +611,11 @@ void Blosc2Accuracy2DSel(const std::string accuracy, const std::string threshold #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc22DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc22DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -720,8 +735,6 @@ void Blosc2Accuracy3DSel(const std::string accuracy, const std::string threshold { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc23DSel_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -742,6 +755,11 @@ void Blosc2Accuracy3DSel(const std::string accuracy, const std::string threshold #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc23DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc23DSel_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI @@ -867,8 +885,6 @@ void Blosc2NullBlocks(const std::string accuracy, const std::string threshold, // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRBlosc2NullBlock_" + accuracy + "_" + threshold + threshold + "_" + - doshuffle + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -883,6 +899,11 @@ void Blosc2NullBlocks(const std::string accuracy, const std::string threshold, #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRBlosc2NullBlock_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + "_MPI.bp"); +#else + const std::string fname("BPWRBlosc2NullBlock_" + accuracy + "_" + threshold + threshold + "_" + + doshuffle + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadLocalVariables.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadLocalVariables.cpp index ec36175659..a725ddf620 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadLocalVariables.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadLocalVariables.cpp @@ -26,7 +26,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1D) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal1D.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -38,6 +37,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1D) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal1D_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal1D.bp"); #endif // Write test data using BP @@ -375,7 +377,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D2x4) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal2D2x4.bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 4; @@ -387,6 +388,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D2x4) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal2D2x4_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal2D2x4.bp"); #endif // Write test data using BP @@ -730,7 +734,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D4x2) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal2D4x2.bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 2; @@ -742,6 +745,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal2D4x2) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal2D4x2_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal2D4x2.bp"); #endif // Write test data using BP @@ -1086,7 +1092,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DAllSteps) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal1DAllSteps.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1098,6 +1103,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DAllSteps) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal1DAllSteps_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal1DAllSteps.bp"); #endif // Write test data using BP @@ -1324,7 +1332,6 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DBlockInfo) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2BPWriteReadLocal1DBlockInfo.bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -1336,6 +1343,9 @@ TEST_F(BPWriteReadLocalVariables, ADIOS2BPWriteReadLocal1DBlockInfo) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2BPWriteReadLocal1DBlockInfo_MPI.bp"); +#else + const std::string fname("ADIOS2BPWriteReadLocal1DBlockInfo.bp"); #endif // Write test data using BP diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARD.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARD.cpp index c5f98e4a90..282347cc4d 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARD.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARD.cpp @@ -20,7 +20,6 @@ void MGARDAccuracy1D(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD1D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -39,6 +38,9 @@ void MGARDAccuracy1D(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD1D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD1D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -175,7 +177,6 @@ void MGARDAccuracy2D(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD2D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -195,6 +196,9 @@ void MGARDAccuracy2D(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD2D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD2D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -336,7 +340,6 @@ void MGARDAccuracy3D(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD3D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -357,6 +360,9 @@ void MGARDAccuracy3D(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD3D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD3D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -498,7 +504,6 @@ void MGARDAccuracy1DSel(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD1DSel_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -517,6 +522,9 @@ void MGARDAccuracy1DSel(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD1DSel_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD1DSel_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -627,7 +635,6 @@ void MGARDAccuracy2DSel(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD2DSel_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -647,6 +654,9 @@ void MGARDAccuracy2DSel(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD2DSel_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD2DSel_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -759,7 +769,6 @@ void MGARDAccuracy3DSel(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD3DSel_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -780,6 +789,9 @@ void MGARDAccuracy3DSel(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD3DSel_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD3DSel_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -899,7 +911,6 @@ void MGARDNullBlocks(const std::string tolerance) // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARDNull_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -915,6 +926,9 @@ void MGARDNullBlocks(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARDNull_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARDNull_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDCuda.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDCuda.cpp index a93adfcbbe..24b4c2dffe 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDCuda.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDCuda.cpp @@ -20,7 +20,6 @@ std::string engineName; // comes from command line void MGARDAccuracy2D(const std::string tolerance) { - const std::string fname("BPWRMGARDCU2D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 100; @@ -33,6 +32,9 @@ void MGARDAccuracy2D(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARDCU2D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARDCU2D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -140,7 +142,6 @@ void MGARDAccuracySmall(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD1D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -155,6 +156,9 @@ void MGARDAccuracySmall(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD1D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD1D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDMDR.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDMDR.cpp index 084e21631b..253c71da24 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDMDR.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDMDR.cpp @@ -28,7 +28,6 @@ TEST_F(BPWriteReadMGARDMDR, BPWRMGARD1D) { // Refactor a dataset with MDR, then // read back with various accuracies - const std::string fname("BPWRMGARDMDR1D.bp"); int mpiRank = 0, mpiSize = 1; const size_t Nx = 30000; // 100k minimum data size for MDR @@ -49,6 +48,9 @@ TEST_F(BPWriteReadMGARDMDR, BPWRMGARD1D) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARDMDR1D_MPI.bp"); +#else + const std::string fname("BPWRMGARDMDR1D.bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDPlus.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDPlus.cpp index 3897361960..a1f95eaa08 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDPlus.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadMGARDPlus.cpp @@ -20,7 +20,6 @@ void MGARDAccuracy1D(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD1D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -39,6 +38,9 @@ void MGARDAccuracy1D(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD1D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD1D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -151,7 +153,6 @@ void MGARDAccuracy2D(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD2D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -171,6 +172,9 @@ void MGARDAccuracy2D(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD2D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD2D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -284,7 +288,6 @@ void MGARDAccuracy3D(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD3D_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -305,6 +308,9 @@ void MGARDAccuracy3D(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD3D_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD3D_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -420,7 +426,6 @@ void MGARDAccuracy1DSel(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD1DSel_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -439,6 +444,9 @@ void MGARDAccuracy1DSel(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD1DSel_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD1DSel_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -541,7 +549,6 @@ void MGARDAccuracy2DSel(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD2DSel_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -561,6 +568,9 @@ void MGARDAccuracy2DSel(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD2DSel_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD2DSel_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -666,7 +676,6 @@ void MGARDAccuracy3DSel(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD3DSel_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -687,6 +696,9 @@ void MGARDAccuracy3DSel(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD3DSel_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD3DSel_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI @@ -804,7 +816,6 @@ void MGARDAccuracy2DSmallSel(const std::string tolerance) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRMGARD2DSmallSel_" + tolerance + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -823,6 +834,9 @@ void MGARDAccuracy2DSmallSel(const std::string tolerance) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRMGARD2DSmallSel_" + tolerance + "_MPI.bp"); +#else + const std::string fname("BPWRMGARD2DSmallSel_" + tolerance + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadPNG.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadPNG.cpp index 84924f9de6..9437764eb4 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadPNG.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadPNG.cpp @@ -26,7 +26,6 @@ void PNGAccuracy2D(const std::string compressionLevel) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRPNG2D_" + compressionLevel + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -59,6 +58,9 @@ void PNGAccuracy2D(const std::string compressionLevel) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRPNG2D_" + compressionLevel + "_MPI.bp"); +#else + const std::string fname("BPWRPNG2D_" + compressionLevel + ".bp"); #endif #if ADIOS2_USE_MPI @@ -275,7 +277,6 @@ void PNGAccuracy2DSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRPNG2DSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -295,6 +296,9 @@ void PNGAccuracy2DSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRPNG2DSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRPNG2DSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadSZ.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadSZ.cpp index 3f95fa0d22..dc1045ca02 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadSZ.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadSZ.cpp @@ -19,7 +19,6 @@ void SZAccuracy1D(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRSZ1D_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -38,6 +37,9 @@ void SZAccuracy1D(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRSZ1D_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRSZ1D_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -151,7 +153,6 @@ void SZAccuracy2D(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRSZ2D_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -171,6 +172,9 @@ void SZAccuracy2D(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRSZ2D_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRSZ2D_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -284,7 +288,6 @@ void SZAccuracy3D(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRSZ3D_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -305,6 +308,9 @@ void SZAccuracy3D(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRSZ3D_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRSZ3D_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -420,7 +426,6 @@ void SZAccuracy1DSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRSZ1DSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -439,6 +444,9 @@ void SZAccuracy1DSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRSZ1DSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRSZ1DSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -552,7 +560,6 @@ void SZAccuracy2DSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRSZ2DSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -572,6 +579,9 @@ void SZAccuracy2DSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRSZ2DSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRSZ2DSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -689,7 +699,6 @@ void SZAccuracy3DSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRSZ3DSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -710,6 +719,9 @@ void SZAccuracy3DSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRSZ3DSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRSZ3DSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI @@ -829,7 +841,6 @@ void SZAccuracy2DSmallSel(const std::string accuracy) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRSZ2DSmallSel_" + accuracy + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -848,6 +859,9 @@ void SZAccuracy2DSmallSel(const std::string accuracy) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRSZ2DSmallSel_" + accuracy + "_MPI.bp"); +#else + const std::string fname("BPWRSZ2DSmallSel_" + accuracy + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadSzComplex.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadSzComplex.cpp index b0c526b379..b46c33d612 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadSzComplex.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadSzComplex.cpp @@ -131,6 +131,7 @@ void Writer(const Dims &shape, const Dims &start, const Dims &count, const size_ size_t datasize = std::accumulate(count.begin(), count.end(), 1, std::multiplies()); #if ADIOS2_USE_MPI adios2::ADIOS adios(MPI_COMM_WORLD); + fileName = "TestBPWriteReadSzComplex_MPI"; #else adios2::ADIOS adios; #endif diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadZfp.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadZfp.cpp index 12e2208306..c1c4acb6d4 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadZfp.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadZfp.cpp @@ -20,7 +20,6 @@ void ZFPRate1D(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP1D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -39,6 +38,9 @@ void ZFPRate1D(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP1D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP1D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -151,7 +153,6 @@ void ZFPRate2D(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP2D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -171,6 +172,9 @@ void ZFPRate2D(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP2D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP2D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -281,7 +285,6 @@ void ZFPRate3D(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP3D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -302,6 +305,9 @@ void ZFPRate3D(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP3D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP3D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -414,7 +420,6 @@ void ZFPRate1DSel(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP1DSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -433,6 +438,9 @@ void ZFPRate1DSel(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP1DSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP1DSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -540,7 +548,6 @@ void ZFPRate2DSel(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP2DSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -560,6 +567,9 @@ void ZFPRate2DSel(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP2DSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP2DSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -664,7 +674,6 @@ void ZFPRate3DSel(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP3DSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -685,6 +694,9 @@ void ZFPRate3DSel(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP3DSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP3DSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -790,7 +802,6 @@ void ZFPRate2DSmallSel(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP2DSmallSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -809,6 +820,9 @@ void ZFPRate2DSmallSel(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP2DSmallSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP2DSmallSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadZfpConfig.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadZfpConfig.cpp index 7379963ea6..76ea948c4e 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadZfpConfig.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadZfpConfig.cpp @@ -23,7 +23,6 @@ void ZfpRate1D(const std::string configFile) const auto begin = configFile.find("_rate") + 5; const auto end = configFile.find_last_of(".") - begin; const std::string rate = configFile.substr(begin, end); - const std::string fname("BPWriteReadZfpConfig1D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -42,6 +41,9 @@ void ZfpRate1D(const std::string configFile) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadZfpConfig1D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWriteReadZfpConfig1D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -137,7 +139,6 @@ void ZfpRate2D(const std::string configFile) const auto begin = configFile.find("_rate") + 5; const auto end = configFile.find_last_of(".") - begin; const std::string rate = configFile.substr(begin, end); - const std::string fname("BPWriteReadZfpConfig2D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -157,6 +158,9 @@ void ZfpRate2D(const std::string configFile) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadZfpConfig2D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWriteReadZfpConfig2D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -251,7 +255,6 @@ void ZfpRate3D(const std::string configFile) const auto begin = configFile.find("_rate") + 5; const auto end = configFile.find_last_of(".") - begin; const std::string rate = configFile.substr(begin, end); - const std::string fname("BPWriteReadZfpConfig3D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -272,6 +275,9 @@ void ZfpRate3D(const std::string configFile) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadZfpConfig3D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWriteReadZfpConfig3D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -368,7 +374,6 @@ void ZfpRate1DSel(const std::string configFile) const auto begin = configFile.find("_rate") + 5; const auto end = configFile.find_last_of(".") - begin; const std::string rate = configFile.substr(begin, end); - const std::string fname("BPWriteReadZfpConfig1DSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -387,6 +392,9 @@ void ZfpRate1DSel(const std::string configFile) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadZfpConfig1DSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWriteReadZfpConfig1DSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -482,7 +490,6 @@ void ZfpRate2DSel(const std::string configFile) const auto begin = configFile.find("_rate") + 5; const auto end = configFile.find_last_of(".") - begin; const std::string rate = configFile.substr(begin, end); - const std::string fname("BPWriteReadZfpConfig2DSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -502,6 +509,9 @@ void ZfpRate2DSel(const std::string configFile) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadZfpConfig2DSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWriteReadZfpConfig2DSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -596,7 +606,6 @@ void ZfpRate3DSel(const std::string configFile) const auto begin = configFile.find("_rate") + 5; const auto end = configFile.find_last_of(".") - begin; const std::string rate = configFile.substr(begin, end); - const std::string fname("BPWriteReadZfpConfig3DSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -617,6 +626,9 @@ void ZfpRate3DSel(const std::string configFile) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadZfpConfig3DSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWriteReadZfpConfig3DSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -713,7 +725,6 @@ void ZfpRate2DSmallSel(const std::string configFile) const auto begin = configFile.find("_rate") + 5; const auto end = configFile.find_last_of(".") - begin; const std::string rate = configFile.substr(begin, end); - const std::string fname("BPWriteReadZfpConfig2DSmallSel_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -732,6 +743,9 @@ void ZfpRate2DSmallSel(const std::string configFile) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWriteReadZfpConfig2DSmallSel_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWriteReadZfpConfig2DSmallSel_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadZfpCuda.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadZfpCuda.cpp index 299fa10f25..c0edb583b0 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadZfpCuda.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadZfpCuda.cpp @@ -22,7 +22,6 @@ void ZFPRateCUDA(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFP1D_" + rate + ".bp"); // Number of rows const size_t Nx = 100; @@ -34,6 +33,9 @@ void ZFPRateCUDA(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFP1D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFP1D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/bp/operations/TestBPWriteReadZfpRemoveOperations.cpp b/testing/adios2/engine/bp/operations/TestBPWriteReadZfpRemoveOperations.cpp index 7aaf7c5c8a..c02f03da21 100644 --- a/testing/adios2/engine/bp/operations/TestBPWriteReadZfpRemoveOperations.cpp +++ b/testing/adios2/engine/bp/operations/TestBPWriteReadZfpRemoveOperations.cpp @@ -23,7 +23,6 @@ void ZFPRate1D(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFPOdd1D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -42,6 +41,9 @@ void ZFPRate1D(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFPOdd1D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFPOdd1D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -169,7 +171,6 @@ void ZFPRate2D(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFPOdd2D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -189,6 +190,9 @@ void ZFPRate2D(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFPOdd2D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFPOdd2D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI @@ -318,7 +322,6 @@ void ZFPRate3D(const std::string rate) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("BPWRZFPOdd3D_" + rate + ".bp"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -339,6 +342,9 @@ void ZFPRate3D(const std::string rate) #if ADIOS2_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("BPWRZFPOdd3D_" + rate + "_MPI.bp"); +#else + const std::string fname("BPWRZFPOdd3D_" + rate + ".bp"); #endif #if ADIOS2_USE_MPI diff --git a/testing/adios2/engine/hdf5/TestHDF5WriteReadAsStream.cpp b/testing/adios2/engine/hdf5/TestHDF5WriteReadAsStream.cpp index 57b30e32b8..604ca9604f 100644 --- a/testing/adios2/engine/hdf5/TestHDF5WriteReadAsStream.cpp +++ b/testing/adios2/engine/hdf5/TestHDF5WriteReadAsStream.cpp @@ -26,7 +26,6 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ADIOS2HDF5WriteRead1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname("ADIOS2HDF5WriteReadAsStream1D8.h5"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -38,6 +37,9 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ADIOS2HDF5WriteRead1D8) #ifdef TEST_HDF5_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2HDF5WriteReadAsStream1D8_MPI.h5"); +#else + const std::string fname("ADIOS2HDF5WriteReadAsStream1D8.h5"); #endif // Write test data using HDF5 @@ -395,7 +397,6 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ADIOS2HDF5WriteRead2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here - const std::string fname("ADIOS2HDF5WriteReadAsStream2D2x4Test.h5"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -410,6 +411,9 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ADIOS2HDF5WriteRead2D2x4) #ifdef TEST_HDF5_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2HDF5WriteReadAsStream2D2x4Test_MPI.h5"); +#else + const std::string fname("ADIOS2HDF5WriteReadAsStream2D2x4Test.h5"); #endif // Write test data using ADIOS2 @@ -634,7 +638,6 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ADIOS2HDF5WriteRead2D4x2) { // Each process would write a 4x2 array and all processes would // form a 2D 4 * (NumberOfProcess * Nx) matrix where Nx is 2 here - const std::string fname("ADIOS2HDF5WriteReadAsStream2D4x2Test.h5"); int mpiRank = 0, mpiSize = 1; // Number of rows @@ -648,6 +651,9 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ADIOS2HDF5WriteRead2D4x2) #ifdef TEST_HDF5_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fname("ADIOS2HDF5WriteReadAsStream2D4x2Test_MPI.h5"); +#else + const std::string fname("ADIOS2HDF5WriteReadAsStream2D4x2Test.h5"); #endif // Write test data using ADIOS2 @@ -874,9 +880,6 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ADIOS2HDF5WriteRead2D4x2) TEST_F(HDF5WriteReadAsStreamTestADIOS2, ReaderWriterDefineVariable) { - const std::string fnameFloat("HDF5ReaderWriterDefineVariable_float.h5"); - const std::string fname("HDF5ReaderWriterDefineVariable_all.h5"); - int mpiRank = 0, mpiSize = 1; // Number of rows const std::size_t Nx = 2; @@ -889,6 +892,11 @@ TEST_F(HDF5WriteReadAsStreamTestADIOS2, ReaderWriterDefineVariable) #ifdef TEST_HDF5_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fnameFloat("HDF5ReaderWriterDefineVariable_float.h5"); + const std::string fname("HDF5ReaderWriterDefineVariable_all.h5"); +#else + const std::string fnameFloat("HDF5ReaderWriterDefineVariable_float.h5"); + const std::string fname("HDF5ReaderWriterDefineVariable_all.h5"); #endif // Write test data using ADIOS2 diff --git a/testing/adios2/engine/hdf5/TestHDF5WriteReadAttributesADIOS2.cpp b/testing/adios2/engine/hdf5/TestHDF5WriteReadAttributesADIOS2.cpp index ac746724d4..a49f3352f8 100644 --- a/testing/adios2/engine/hdf5/TestHDF5WriteReadAttributesADIOS2.cpp +++ b/testing/adios2/engine/hdf5/TestHDF5WriteReadAttributesADIOS2.cpp @@ -25,9 +25,6 @@ class BPWriteReadAttributeTestADIOS2 : public ::testing::Test // ADIOS2 write, read for single value attributes TEST_F(BPWriteReadAttributeTestADIOS2, ADIOS2BPWriteReadSingleTypes) { - const std::string fName = - "." + std::string(&adios2::PathSeparator, 1) + "ADIOS2BPWriteAttributeReadSingleTypes.h5"; - const std::string zero = std::to_string(0); const std::string s1_Single = std::string("s1_Single_") + zero; const std::string s1_Array = std::string("s1_Array_") + zero; @@ -50,8 +47,14 @@ TEST_F(BPWriteReadAttributeTestADIOS2, ADIOS2BPWriteReadSingleTypes) // Write test data using BP #ifdef TEST_HDF5_MPI adios2::ADIOS adios(MPI_COMM_WORLD); + const std::string fName = "." + std::string(&adios2::PathSeparator, 1) + + "ADIOS2BPWriteAttributeReadSingleTypes_MPI.h5"; + #else adios2::ADIOS adios; + const std::string fName = + "." + std::string(&adios2::PathSeparator, 1) + "ADIOS2BPWriteAttributeReadSingleTypes.h5"; + #endif { adios2::IO io = adios.DeclareIO("TestIO"); @@ -185,13 +188,16 @@ TEST_F(BPWriteReadAttributeTestADIOS2, ADIOS2BPWriteReadSingleTypes) // ADIOS2 write read for array attributes TEST_F(BPWriteReadAttributeTestADIOS2, ADIOS2BPWriteReadArrayTypes) { - const std::string fName = - "." + std::string(&adios2::PathSeparator, 1) + "ADIOS2BPWriteAttributeReadArrayTypes.h5"; #ifdef TEST_HDF5_MPI int mpiRank = 0, mpiSize = 1; MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fName = "." + std::string(&adios2::PathSeparator, 1) + + "ADIOS2BPWriteAttributeReadArrayTypes_MPI.h5"; +#else + const std::string fName = + "." + std::string(&adios2::PathSeparator, 1) + "ADIOS2BPWriteAttributeReadArrayTypes.h5"; #endif const std::string zero = std::to_string(0); @@ -361,9 +367,6 @@ TEST_F(BPWriteReadAttributeTestADIOS2, ADIOS2BPWriteReadArrayTypes) TEST_F(BPWriteReadAttributeTestADIOS2, BPWriteReadSingleTypesVar) { - const std::string fName = - "." + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadSingleTypesVar.h5"; - const std::string zero = std::to_string(0); const std::string s1_Single = std::string("s1_Single_") + zero; const std::string i8_Single = std::string("i8_Single_") + zero; @@ -387,8 +390,14 @@ TEST_F(BPWriteReadAttributeTestADIOS2, BPWriteReadSingleTypesVar) // Write test data using BP #ifdef TEST_HDF5_MPI adios2::ADIOS adios(MPI_COMM_WORLD); + const std::string fName = + "." + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadSingleTypesVar_MPI.h5"; + #else adios2::ADIOS adios; + const std::string fName = + "." + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadSingleTypesVar.h5"; + #endif { adios2::IO io = adios.DeclareIO("TestIO"); @@ -511,13 +520,15 @@ TEST_F(BPWriteReadAttributeTestADIOS2, BPWriteReadSingleTypesVar) // ADIOS2 write read for array attributes TEST_F(BPWriteReadAttributeTestADIOS2, ADIOS2BPWriteReadArrayTypesVar) { - const std::string fName = - "." + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadArrayTypesVar.h5"; - #ifdef TEST_HDF5_MPI int mpiRank = 0, mpiSize = 1; MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); MPI_Comm_size(MPI_COMM_WORLD, &mpiSize); + const std::string fName = + "." + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadArrayTypesVar_MPI.h5"; +#else + const std::string fName = + "." + std::string(&adios2::PathSeparator, 1) + "BPWriteAttributeReadArrayTypesVar.h5"; #endif const std::string zero = std::to_string(0); diff --git a/testing/adios2/hierarchy/TestBPHierarchicalReading.cpp b/testing/adios2/hierarchy/TestBPHierarchicalReading.cpp index c4039dbe19..1d39c84c39 100644 --- a/testing/adios2/hierarchy/TestBPHierarchicalReading.cpp +++ b/testing/adios2/hierarchy/TestBPHierarchicalReading.cpp @@ -23,17 +23,19 @@ class ADIOSHierarchicalReadVariableTest : public ::testing::Test TEST_F(ADIOSHierarchicalReadVariableTest, Read) { - std::string filename = "ADIOSHierarchicalReadVariable." + engineName + ".bp"; - // Number of steps const std::size_t NSteps = 2; long unsigned int rank, size; #if ADIOS2_USE_MPI + std::string filename = "ADIOSHierarchicalReadVariable." + engineName + "_MPI.bp"; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #else + std::string filename = "ADIOSHierarchicalReadVariable." + engineName + ".bp"; + rank = 0; size = 1; #endif diff --git a/testing/adios2/performance/manyvars/TestManyVars.cpp b/testing/adios2/performance/manyvars/TestManyVars.cpp index f17cedd5ec..923b6df054 100644 --- a/testing/adios2/performance/manyvars/TestManyVars.cpp +++ b/testing/adios2/performance/manyvars/TestManyVars.cpp @@ -235,8 +235,13 @@ class TestManyVars : public ::testing::TestWithParam NBLOCKS = p.nblocks; NSTEPS = p.nsteps; REDEFINE = redefineVars; +#if ADIOS2_USE_MPI + snprintf(FILENAME, sizeof(FILENAME), "manyVars.%zu_%zu_%zu%s_MPI.bp", NVARS, NBLOCKS, + NSTEPS, REDEFINE ? "_redefine" : ""); +#else snprintf(FILENAME, sizeof(FILENAME), "manyVars.%zu_%zu_%zu%s.bp", NVARS, NBLOCKS, NSTEPS, REDEFINE ? "_redefine" : ""); +#endif alloc_vars(); #if ADIOS2_USE_MPI diff --git a/testing/adios2/performance/query/TestBPQuery.cpp b/testing/adios2/performance/query/TestBPQuery.cpp index 6e0e918a10..7ded0040ec 100644 --- a/testing/adios2/performance/query/TestBPQuery.cpp +++ b/testing/adios2/performance/query/TestBPQuery.cpp @@ -81,7 +81,11 @@ class BPQueryTest : public ::testing::Test void BPQueryTest::QueryIntVar(const std::string &fname, adios2::ADIOS &adios, const std::string &engineName) { +#if ADIOS2_USE_MPI + std::string ioName = "IOQueryTestInt_MPI" + engineName; +#else std::string ioName = "IOQueryTestInt" + engineName; +#endif adios2::IO io = adios.DeclareIO(ioName.c_str()); if (!engineName.empty()) @@ -235,12 +239,13 @@ TEST_F(BPQueryTest, BP5) std::string engineName = "BP5"; // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname(engineName + "Query1D.bp"); #if ADIOS2_USE_MPI adios2::ADIOS adios(MPI_COMM_WORLD); + const std::string fname(engineName + "Query1D_MPI.bp"); #else adios2::ADIOS adios; + const std::string fname(engineName + "Query1D.bp"); #endif WriteFile(fname, adios, engineName); diff --git a/testing/h5vol/TestH5VolWriteReadBPFile.cpp b/testing/h5vol/TestH5VolWriteReadBPFile.cpp index 5fdd23b679..ddc202f3f5 100644 --- a/testing/h5vol/TestH5VolWriteReadBPFile.cpp +++ b/testing/h5vol/TestH5VolWriteReadBPFile.cpp @@ -462,8 +462,12 @@ TEST_F(H5VolWriteReadTest, H5VolWriteHDF5Read1D8) { // Each process would write a 1x8 array and all processes would // form a mpiSize * Nx 1D array - const std::string fname = "H5VolTest1D8.bp"; +#ifdef TEST_HDF5_MPI + const std::string fname = "H5VolTest1D8_MPI.bp"; +#else + const std::string fname = "H5VolTest1D8.bp"; +#endif int mpiRank = 0, mpiSize = 1; // Number of rows const std::size_t Nx = 8; @@ -649,8 +653,11 @@ TEST_F(H5VolWriteReadTest, H5VolWriteHDF5Read2D2x4) { // Each process would write a 2x4 array and all processes would // form a 2D 2 * (numberOfProcess*Nx) matrix where Nx is 4 here +#ifdef TEST_HDF5_MPI + std::string fname = "H5VolTest2D2x4_MPI.bp"; +#else std::string fname = "H5VolTest2D2x4.bp"; - +#endif int mpiRank = 0, mpiSize = 1; // Number of rows const std::size_t Nx = 4;