Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ci: find branchs in chrono order #3666

Closed
wants to merge 45 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
eff1b1b
BP5: fixes memory error with IBM XL
vicentebolea May 10, 2023
f1cedf3
Merge pull request #3619 from vicentebolea/backports-bp5-bugfix
vicentebolea May 29, 2023
457d8e2
Merge pull request #3623 from vicentebolea/kokkos-use-nvcc-wrapper
vicentebolea May 17, 2023
72e430c
ci,ascent: enable ascent builds
vicentebolea May 5, 2023
6bbf433
ci,olcf,crusher: enable Crusher CI
vicentebolea May 12, 2023
55fe563
ci,crusher: minor tweaks
vicentebolea May 25, 2023
86b792f
docs: update whatsnew
vicentebolea May 25, 2023
4e640c1
Bump requests from 2.28.1 to 2.31.0 in /docs
dependabot[bot] May 23, 2023
be4fcbc
CI,windows: change MSMPI URL
vicentebolea May 22, 2023
bc04804
fixed https://github.com/ornladios/ADIOS2/issues/3638
guj May 26, 2023
c55cf75
crusher,ci: set unique env per pipeline
vicentebolea May 29, 2023
3bd4cff
Merge pull request #3643 from vicentebolea/backports-from-master
vicentebolea May 30, 2023
32eb32b
Allow Span in files opened for Append
eisenhauer Jun 8, 2023
7bd34be
Merge pull request #3657 from eisenhauer/SpanAppend
eisenhauer Jun 8, 2023
b395e8f
ci: find branchs in chrono order
vicentebolea Jun 30, 2023
6277f97
Fix: std::min w/ windows.h in C-Blosc2
ax3l Jul 1, 2023
12c4319
Merge pull request #3681 from ax3l/fix-blosc2-windows-min
eisenhauer Jul 2, 2023
e0f9dc0
enet 2023-06-08 (f93beb4e)
Jun 8, 2023
74eebf7
Merge branch 'upstream-enet' into BP5
eisenhauer Jul 2, 2023
298e38a
EVPath 2023-06-10 (e9be8e63)
Jun 10, 2023
9c455f6
Merge branch 'upstream-EVPath' into BP5
eisenhauer Jul 2, 2023
6535ef9
atl 2023-06-07 (f3c23577)
Jun 7, 2023
86c328a
Merge branch 'upstream-atl' into BP5
eisenhauer Jul 2, 2023
1e45542
ffs 2023-06-06 (46d79432)
Jun 6, 2023
324febb
Merge branch 'upstream-ffs' into BP5
eisenhauer Jul 2, 2023
20afb0e
dill 2023-06-09 (6c94efb3)
Jun 9, 2023
5023da9
Merge branch 'upstream-dill' into BP5
eisenhauer Jul 2, 2023
8ea624a
Tweaks for BP5 on Windows
eisenhauer Jul 2, 2023
e08dba0
String fix and timeout
eisenhauer Jul 3, 2023
9785b9a
Merge pull request #3682 from eisenhauer/BP5
eisenhauer Jul 3, 2023
06ca9cf
Merge branch 'release_29'
vicentebolea Jul 3, 2023
537405d
Merge pull request #3681 from ax3l/fix-blosc2-windows-min
eisenhauer Jul 2, 2023
1b99c8c
Merge release_29
vicentebolea Jul 3, 2023
6917bf9
Fix memory leak when there are Joined Arrays in streaming mode
eisenhauer May 10, 2023
68b282c
Merge pull request #3684 from eisenhauer/FixLeak
eisenhauer Jul 3, 2023
defc1c7
Merge pull request #3673 from vicentebolea/disable-ascent-install-test
vicentebolea Jun 29, 2023
43ae62c
Merge pull request #3650 from ornladios/dependabot/pip/docs/cryptogra…
vicentebolea Jun 14, 2023
af34a40
Merge pull request #3652 from guj/unistd-windows
guj Jun 5, 2023
ce73176
Merge pull request #3684 from eisenhauer/FixLeak
eisenhauer Jul 3, 2023
5e1f9f5
Merge pull request #3685 from vicentebolea/backports_release_29
vicentebolea Jul 4, 2023
ec7d094
Merge branch 'release_29'
vicentebolea Jul 4, 2023
55dc9b2
Merge pull request #3657 from eisenhauer/SpanAppend
eisenhauer Jun 8, 2023
9318869
Fix for the issue #3646. It looks like the default for macos and pyth…
dmitry-ganyushin Jul 4, 2023
c8c5238
Merge pull request #3687 from dmitry-ganyushin/i3646-mult-macos
eisenhauer Jul 4, 2023
b8b4ac0
Merge pull request #3666 from vicentebolea/ci-crusher-tweaks
vicentebolea Jul 5, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 53 additions & 41 deletions .gitlab/config/generate_pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
#
# generate_pipeline.py
#
# Created: May 19, 2023
# Author: Vicente Adolfo Bolea Sanchez <vicente.bolea@kitware.com>

from datetime import datetime
import argparse
import itertools
import requests
import time
import re
Expand All @@ -18,18 +18,43 @@
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


def is_date_after(date, days):
deadline_sec = int(time.time()) - (days * 86400)
utc_dt = datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ')
timestamp_sec = (utc_dt - datetime(1970, 1, 1)).total_seconds()
return timestamp_sec > deadline_sec


def request_dict(url):
def request_as_dict(url):
r = requests.get(url + '?per_page=100', verify=False)
return r.json()


def add_timestamp(branch):
date_str = branch['commit']['committed_date']
# We ignore the TZ since Gitlab/GitHub always reports in UTC
branch['dt'] = int(
datetime.strptime(date_str.split(".")[0],
'%Y-%m-%dT%H:%M:%S').timestamp())
return branch


def is_recent(branch):
deadline_sec = int(time.time()) - (args.days * 86400)
return branch['dt'] > deadline_sec


def has_no_status(branch):
gh_commit_sha = branch['commit']['id']
# Backported branches use the merge head
if re.fullmatch(r'^pr\d+_.*$', branch['name']):
gh_commit_sha = branch['commit']['parent_ids'][1]

# Query GitHub for the status of this commit
commit = request_as_dict(gh_url + '/commits/' + gh_commit_sha + '/status')
if commit is None or 'sha' not in commit:
return False

for status in commit['statuses']:
if status['context'] == args.gh_context:
return False

return True


parser = argparse.ArgumentParser(
prog='generate_pipeline.py',
description='Generate Dynamic pipelines for Gitlab')
Expand All @@ -39,12 +64,12 @@ def request_dict(url):
parser.add_argument(
'-n', '--gh-name', required=True,
help='Full name of the GitHub project. Ex: ornladios/ADIOS2')
parser.add_argument(
'-c', '--gh-context', default='OLCF Crusher (Frontier)',
help='Name of the status in GitHub (A.K.A context)')
parser.add_argument(
'-p', '--project_id', required=True,
help='Gitlab internal project ID of the project.')
parser.add_argument(
'-c', '--gh-context', default='OLCF Crusher (Frontier)',
help='Name of the status in GitHub (A.K.A context)')
parser.add_argument(
'-d', '--days', type=int, default=1,
help='How many days back to search for commits')
Expand All @@ -57,34 +82,21 @@ def request_dict(url):
args = parser.parse_args()


with open(args.template_file, "r") as fd:
gl_url = args.gl_url + '/api/v4/projects/' + str(args.project_id)
gh_url = 'https://api.github.com/repos/' + args.gh_name

with open(args.template_file, 'r') as fd:
template_str = fd.read()
gl_url = args.gl_url + "/api/v4/projects/" + str(args.project_id)
gh_url = 'https://api.github.com/repos/' + args.gh_name
branches = request_dict(gl_url + "/repository/branches")
num_pipeline = 0

branches = request_as_dict(gl_url + '/repository/branches')
branches = map(add_timestamp, branches)
branches = filter(is_recent, branches)
branches = filter(has_no_status, branches)

# Select the arg.max most least recent branches
branches = sorted(branches, key=lambda x: x['dt'])
branches = itertools.islice(branches, args.max)

for branch in branches:
# Convert to ISO 8601 date format.
date_stamp = branch['commit']['committed_date'].split('.')[0] + "Z"
if num_pipeline < args.max and is_date_after(date_stamp, args.days):
commit_sha = branch['commit']['id']
# Backported branches use the merge head
gh_commit_sha = commit_sha
if re.fullmatch(r'^pr\d+_.*$', branch['name']):
gh_commit_sha = branch['commit']['parent_ids'][1]

# Quit if GitHub does not have the commit
if 'sha' not in request_dict(gh_url + "/commits/" + gh_commit_sha):
continue

# Query GitHub for the status of this commit
commit = request_dict(gh_url + "/commits/" +
gh_commit_sha + "/status")
status_found = False
for status in commit['statuses']:
if status['context'] == args.gh_context:
status_found = True
if not status_found:
num_pipeline += 1
print(template_str.format(
branch=branch['name'], commit=commit_sha))
print(template_str.format(
branch=branch['name'], commit=branch['commit']['id']))
2 changes: 1 addition & 1 deletion cmake/DetectOptions.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ if(DAOS_FOUND)
endif()

# BP5
if(ADIOS2_USE_BP5 AND NOT WIN32)
if(ADIOS2_USE_BP5)
set(ADIOS2_HAVE_BP5 TRUE)
endif()

Expand Down
5 changes: 3 additions & 2 deletions source/adios2/core/Engine.tcc
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,9 @@ template <class T>
typename Variable<T>::Span &Engine::Put(Variable<T> &variable,
const bool initialize, const T &value)
{
CheckOpenModes({{Mode::Write}}, " for variable " + variable.m_Name +
", in call to Variable<T>::Span Put");
CheckOpenModes({{Mode::Write, Mode::Append}},
" for variable " + variable.m_Name +
", in call to Variable<T>::Span Put");
if (!variable.m_Operations.empty())
{
helper::Throw<std::invalid_argument>(
Expand Down
4 changes: 4 additions & 0 deletions source/adios2/core/Variable.tcc
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,13 @@ std::pair<T, T> Variable<T>::DoMinMax(const size_t step) const
MinMaxStruct MM;
if (m_Engine->VariableMinMax(*this, step, MM))
{
if (std::is_same<T, std::string>::value) {
return minMax;
} else {
minMax.first = *(T *)&MM.MinUnion;
minMax.second = *(T *)&MM.MaxUnion;
return minMax;
}
}
}
if (m_Engine != nullptr && !m_FirstStreamingStep)
Expand Down
12 changes: 6 additions & 6 deletions source/adios2/engine/bp5/BP5Reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ void BP5Reader::PerformGets()
// then main thread process the last subset
for (size_t tid = 0; tid < nThreads - 1; ++tid)
{
futures[tid] = std::async(std::launch::async, lf_Reader, tid + 1,
futures[tid] = std::async(std::launch::async, lf_Reader, (int)(tid + 1),
maxOpenFiles);
}
// main thread runs last subset of reads
Expand Down Expand Up @@ -513,9 +513,9 @@ void BP5Reader::InitParameters()
}

size_t limit = helper::RaiseLimitNoFile();
if (m_Parameters.MaxOpenFilesAtOnce > limit - 8)
if (m_Parameters.MaxOpenFilesAtOnce > (unsigned int) limit - 8)
{
m_Parameters.MaxOpenFilesAtOnce = limit - 8;
m_Parameters.MaxOpenFilesAtOnce = (unsigned int) limit - 8;
}
}

Expand Down Expand Up @@ -986,11 +986,11 @@ size_t BP5Reader::ParseMetadataIndex(format::BufferSTL &bufferSTL,
{
auto p = m_WriterMap.emplace(m_StepsCount, WriterMapStruct());
auto &s = p.first->second;
s.WriterCount = helper::ReadValue<uint64_t>(
s.WriterCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, m_Minifooter.IsLittleEndian);
s.AggregatorCount = helper::ReadValue<uint64_t>(
s.AggregatorCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, m_Minifooter.IsLittleEndian);
s.SubfileCount = helper::ReadValue<uint64_t>(
s.SubfileCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, m_Minifooter.IsLittleEndian);
// Get the process -> subfile map
s.RankToSubfile.reserve(s.WriterCount);
Expand Down
54 changes: 31 additions & 23 deletions source/adios2/engine/bp5/BP5Writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ void BP5Writer::WriteMetaMetadata(
m_FileMetaMetadataManager.WriteFiles((char *)b.MetaMetaInfo,
b.MetaMetaInfoLen);
}
m_FileMetaMetadataManager.FlushFiles();
}

uint64_t
Expand Down Expand Up @@ -206,6 +207,8 @@ BP5Writer::WriteMetadata(const std::vector<core::iovec> &MetaDataBlocks,
MetaDataSize += b.iov_len;
}

m_FileMetadataManager.FlushFiles();

m_MetaDataPos += MetaDataSize;
return MetaDataSize;
}
Expand Down Expand Up @@ -272,6 +275,7 @@ void BP5Writer::WriteData(format::BufferV *Data)
std::to_string(m_Parameters.AggregationType) +
"is not supported in BP5");
}
m_FileDataManager.FlushFiles();
delete Data;
}
}
Expand Down Expand Up @@ -337,8 +341,6 @@ void BP5Writer::WriteData_EveryoneWrites(format::BufferV *Data,
void BP5Writer::WriteMetadataFileIndex(uint64_t MetaDataPos,
uint64_t MetaDataSize)
{
m_FileMetadataManager.FlushFiles();

// bufsize: Step record
size_t bufsize =
1 + (4 + ((FlushPosSizeInfo.size() * 2) + 1) * m_Comm.Size()) *
Expand Down Expand Up @@ -407,7 +409,6 @@ void BP5Writer::WriteMetadataFileIndex(uint64_t MetaDataPos,
}

m_FileMetadataIndexManager.WriteFiles((char *)buf.data(), buf.size());

#ifdef DUMPDATALOCINFO
std::cout << "Flush count is :" << FlushPosSizeInfo.size() << std::endl;
std::cout << "Write Index positions = {" << std::endl;
Expand All @@ -427,6 +428,8 @@ void BP5Writer::WriteMetadataFileIndex(uint64_t MetaDataPos,
}
std::cout << "}" << std::endl;
#endif
m_FileMetadataIndexManager.FlushFiles();

/* reset for next timestep */
FlushPosSizeInfo.clear();
}
Expand Down Expand Up @@ -472,7 +475,7 @@ void BP5Writer::MarshalAttributes()

if (!attributePair.second->m_IsSingleValue)
{
element_count = (*baseAttr)->m_Elements;
element_count = (int)(*baseAttr)->m_Elements;
}

if (type == DataType::None)
Expand Down Expand Up @@ -511,7 +514,7 @@ void BP5Writer::MarshalAttributes()
void *data_addr = &attribute.m_DataSingleValue; \
if (!attribute.m_IsSingleValue) \
{ \
element_count = attribute.m_Elements; \
element_count = (int)attribute.m_Elements; \
data_addr = attribute.m_DataArray.data(); \
} \
m_BP5Serializer.MarshalAttribute(attribute.m_Name.c_str(), type, \
Expand All @@ -536,7 +539,7 @@ void BP5Writer::EndStep()

// true: advances step
auto TSInfo = m_BP5Serializer.CloseTimestep(
m_WriterStep, m_Parameters.AsyncWrite || m_Parameters.DirectIO);
(int)m_WriterStep, m_Parameters.AsyncWrite || m_Parameters.DirectIO);

/* TSInfo includes NewMetaMetaBlocks, the MetaEncodeBuffer, the
* AttributeEncodeBuffer and the data encode Vector */
Expand Down Expand Up @@ -684,6 +687,10 @@ void BP5Writer::EndStep()
m_AsyncWriteLock.unlock();
}
}
m_FileMetadataIndexManager.FlushFiles();
m_FileMetadataManager.FlushFiles();
m_FileMetaMetadataManager.FlushFiles();
m_FileDataManager.FlushFiles();

m_Profiler.Stop("ES");
m_WriterStep++;
Expand Down Expand Up @@ -752,7 +759,8 @@ void BP5Writer::InitParameters()
{
size_t k =
m_Parameters.StripeSize / m_Parameters.DirectIOAlignOffset + 1;
m_Parameters.StripeSize = k * m_Parameters.DirectIOAlignOffset;
m_Parameters.StripeSize =
(unsigned int)(k * m_Parameters.DirectIOAlignOffset);
}
if (m_Parameters.BufferChunkSize % m_Parameters.DirectIOAlignOffset)
{
Expand Down Expand Up @@ -852,12 +860,12 @@ uint64_t BP5Writer::CountStepsInMetadataIndex(format::BufferSTL &bufferSTL)
{
case IndexRecord::WriterMapRecord:
{
m_AppendWriterCount =
helper::ReadValue<uint64_t>(buffer, position, IsLittleEndian);
m_AppendAggregatorCount =
helper::ReadValue<uint64_t>(buffer, position, IsLittleEndian);
m_AppendSubfileCount =
helper::ReadValue<uint64_t>(buffer, position, IsLittleEndian);
m_AppendWriterCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, IsLittleEndian);
m_AppendAggregatorCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, IsLittleEndian);
m_AppendSubfileCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, IsLittleEndian);
if (m_AppendSubfileCount > nDataFiles)
{
nDataFiles = m_AppendSubfileCount;
Expand Down Expand Up @@ -939,12 +947,12 @@ uint64_t BP5Writer::CountStepsInMetadataIndex(format::BufferSTL &bufferSTL)
{
case IndexRecord::WriterMapRecord:
{
m_AppendWriterCount =
helper::ReadValue<uint64_t>(buffer, position, IsLittleEndian);
m_AppendAggregatorCount =
helper::ReadValue<uint64_t>(buffer, position, IsLittleEndian);
m_AppendSubfileCount =
helper::ReadValue<uint64_t>(buffer, position, IsLittleEndian);
m_AppendWriterCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, IsLittleEndian);
m_AppendAggregatorCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, IsLittleEndian);
m_AppendSubfileCount = (uint32_t)helper::ReadValue<uint64_t>(
buffer, position, IsLittleEndian);

// Get the process -> subfile map
writerToFileMap.clear();
Expand Down Expand Up @@ -1796,8 +1804,8 @@ void BP5Writer::PutCommon(VariableBase &variable, const void *values, bool sync)
helper::DimsArray MemoryCount(variable.m_MemoryCount);
helper::DimsArray varCount(variable.m_Count);

int DimCount = variable.m_Count.size();
std::vector<size_t> ZeroDims(DimCount);
int DimCount = (int)variable.m_Count.size();
helper::DimsArray ZeroDims(DimCount, (size_t)0);
// get a temporary span then fill with memselection now
format::BufferV::BufferPos bp5span(0, 0, 0);

Expand All @@ -1816,8 +1824,8 @@ void BP5Writer::PutCommon(VariableBase &variable, const void *values, bool sync)
}
helper::NdCopy((const char *)values, helper::CoreDims(ZeroDims),
MemoryCount, sourceRowMajor, false, (char *)ptr,
MemoryStart, varCount, sourceRowMajor, false, ObjSize,
helper::CoreDims(), helper::CoreDims(),
MemoryStart, varCount, sourceRowMajor, false,
(int)ObjSize, helper::CoreDims(), helper::CoreDims(),
helper::CoreDims(), helper::CoreDims(),
false /* safemode */, variable.m_MemSpace);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ void BP5Writer::AsyncWriteOwnData(AsyncWriteInfo *info,
size_t wrote = 0;
size_t block = 0;
size_t temp_offset = 0;
size_t max_size = std::max(1024 * 1024UL, totalsize / 100UL);
size_t max_size = std::max((size_t)1024 * 1024UL, totalsize / 100UL);

bool firstWrite = seekOnFirstWrite;
while (block < nBlocks)
Expand Down
8 changes: 4 additions & 4 deletions source/adios2/operator/compress/CompressBlosc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,8 @@ size_t CompressBlosc::Operate(const char *dataIn, const Dims &blockStart,
for (; inputOffset < sizeIn; ++chunk)
{
size_t inputChunkSize =
std::min(sizeIn - inputOffset,
static_cast<size_t>(BLOSC2_MAX_BUFFERSIZE));
std::min<size_t>(sizeIn - inputOffset,
static_cast<size_t>(BLOSC2_MAX_BUFFERSIZE));
bloscSize_t maxIntputSize =
static_cast<bloscSize_t>(inputChunkSize);

Expand Down Expand Up @@ -375,8 +375,8 @@ size_t CompressBlosc::DecompressChunkedFormat(const char *bufferIn,
char *out_ptr = dataOut + currentOutputSize;

size_t outputChunkSize =
std::min(uncompressedSize - currentOutputSize,
static_cast<size_t>(BLOSC2_MAX_BUFFERSIZE));
std::min<size_t>(uncompressedSize - currentOutputSize,
static_cast<size_t>(BLOSC2_MAX_BUFFERSIZE));
bloscSize_t max_output_size =
static_cast<bloscSize_t>(outputChunkSize);

Expand Down
Loading