Skip to content

Commit

Permalink
Merge branch 'master' into mjmac/DAOS-14850
Browse files Browse the repository at this point in the history
Change-Id: Ib1685ba48ec4abf382b7dd5e4e41ff36605c94c3
Required-githooks: true
  • Loading branch information
mjmac committed Jan 10, 2024
2 parents b7c5504 + 1ec01fb commit fb8c169
Show file tree
Hide file tree
Showing 59 changed files with 2,084 additions and 761 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/pylint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,16 @@ jobs:
uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install core python packages
run: python3 -m pip install --requirement requirements.txt
- name: Install extra python packages
run: python3 -m pip install --requirement utils/cq/requirements.txt
- name: Install enchant
run: sudo apt-get update && sudo apt-get -y install python3-enchant
- name: Show versions
run: ./utils/cq/daos_pylint.py --version
- name: Run pylint check.
run: ./utils/cq/daos_pylint.py --git --output-format github
146 changes: 73 additions & 73 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -1040,78 +1040,78 @@ pipeline {
}
}
} // stage('Fault inection testing on EL 8.8')
// stage('Test RPMs on EL 8.6') {
// when {
// beforeAgent true
// expression { ! skipStage() }
// }
// agent {
// label params.CI_UNIT_VM1_LABEL
// }
// steps {
// job_step_update(
// testRpm(inst_repos: daosRepos(),
// daos_pkg_version: daosPackagesVersion(next_version))
// )
// }
// post {
// always {
// rpm_test_post(env.STAGE_NAME, env.NODELIST)
// }
// }
// } // stage('Test CentOS 7 RPMs')
// stage('Test RPMs on Leap 15.4') {
// when {
// beforeAgent true
// expression { ! skipStage() }
// }
// agent {
// label params.CI_UNIT_VM1_LABEL
// }
// steps {
// /* neither of these work as FTest strips the first node
// out of the pool requiring 2 node clusters at minimum
// * additionally for this use-case, can't override
// ftest_arg with this :-(
// script {
// 'Test RPMs on Leap 15.4': getFunctionalTestStage(
// name: 'Test RPMs on Leap 15.4',
// pragma_suffix: '',
// label: params.CI_UNIT_VM1_LABEL,
// next_version: next_version,
// stage_tags: '',
// default_tags: 'test_daos_management',
// nvme: 'auto',
// run_if_pr: true,
// run_if_landing: true,
// job_status: job_status_internal
// )
// }
// job_step_update(
// functionalTest(
// test_tag: 'test_daos_management',
// ftest_arg: '--yaml_extension single_host',
// inst_repos: daosRepos(),
// inst_rpms: functionalPackages(1, next_version, 'tests-internal'),
// test_function: 'runTestFunctionalV2'))
// }
// post {
// always {
// functionalTestPostV2()
// job_status_update()
// }
// } */
// job_step_update(
// testRpm(inst_repos: daosRepos(),
// daos_pkg_version: daosPackagesVersion(next_version))
// )
// }
// post {
// always {
// rpm_test_post(env.STAGE_NAME, env.NODELIST)
// }
// }
// } // stage('Test Leap 15 RPMs')
stage('Test RPMs on EL 8.6') {
when {
beforeAgent true
expression { ! skipStage() }
}
agent {
label params.CI_UNIT_VM1_LABEL
}
steps {
job_step_update(
testRpm(inst_repos: daosRepos(),
daos_pkg_version: daosPackagesVersion(next_version))
)
}
post {
always {
rpm_test_post(env.STAGE_NAME, env.NODELIST)
}
}
} // stage('Test CentOS 7 RPMs')
stage('Test RPMs on Leap 15.4') {
when {
beforeAgent true
expression { ! skipStage() }
}
agent {
label params.CI_UNIT_VM1_LABEL
}
steps {
/* neither of these work as FTest strips the first node
out of the pool requiring 2 node clusters at minimum
* additionally for this use-case, can't override
ftest_arg with this :-(
script {
'Test RPMs on Leap 15.4': getFunctionalTestStage(
name: 'Test RPMs on Leap 15.4',
pragma_suffix: '',
label: params.CI_UNIT_VM1_LABEL,
next_version: next_version,
stage_tags: '',
default_tags: 'test_daos_management',
nvme: 'auto',
run_if_pr: true,
run_if_landing: true,
job_status: job_status_internal
)
}
job_step_update(
functionalTest(
test_tag: 'test_daos_management',
ftest_arg: '--yaml_extension single_host',
inst_repos: daosRepos(),
inst_rpms: functionalPackages(1, next_version, 'tests-internal'),
test_function: 'runTestFunctionalV2'))
}
post {
always {
functionalTestPostV2()
job_status_update()
}
} */
job_step_update(
testRpm(inst_repos: daosRepos(),
daos_pkg_version: daosPackagesVersion(next_version))
)
}
post {
always {
rpm_test_post(env.STAGE_NAME, env.NODELIST)
}
}
} // stage('Test Leap 15 RPMs')
} // parallel
} // stage('Test')
stage('Test Storage Prep on EL 8.8') {
Expand Down Expand Up @@ -1202,7 +1202,7 @@ pipeline {
stage_tags: 'hw,medium,provider',
default_tags: startedByTimer() ? 'pr daily_regression' : 'pr',
default_nvme: 'auto',
provider: 'ucx+dc_x',
provider: cachedCommitPragma('Test-provider-ucx', 'ucx+ud_x'),
run_if_pr: false,
run_if_landing: false,
job_status: job_status_internal
Expand Down
12 changes: 6 additions & 6 deletions src/bio/bio_xstream.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/**
* (C) Copyright 2018-2023 Intel Corporation.
* (C) Copyright 2018-2024 Intel Corporation.
*
* SPDX-License-Identifier: BSD-2-Clause-Patent
*/
Expand Down Expand Up @@ -196,8 +196,8 @@ set_faulty_criteria(void)
glb_criteria.fc_max_csum_errs = UINT32_MAX;

d_getenv_bool("DAOS_NVME_AUTO_FAULTY_ENABLED", &glb_criteria.fc_enabled);
d_getenv_int("DAOS_NVME_AUTO_FAULTY_IO", &glb_criteria.fc_max_io_errs);
d_getenv_int("DAOS_NVME_AUTO_FAULTY_CSUM", &glb_criteria.fc_max_csum_errs);
d_getenv_uint32_t("DAOS_NVME_AUTO_FAULTY_IO", &glb_criteria.fc_max_io_errs);
d_getenv_uint32_t("DAOS_NVME_AUTO_FAULTY_CSUM", &glb_criteria.fc_max_csum_errs);

D_INFO("NVMe auto faulty is %s. Criteria: max_io_errs:%u, max_csum_errs:%u\n",
glb_criteria.fc_enabled ? "enabled" : "disabled",
Expand Down Expand Up @@ -249,15 +249,15 @@ bio_nvme_init(const char *nvme_conf, int numa_node, unsigned int mem_size,
d_getenv_bool("DAOS_SCM_RDMA_ENABLED", &bio_scm_rdma);
D_INFO("RDMA to SCM is %s\n", bio_scm_rdma ? "enabled" : "disabled");

d_getenv_int("DAOS_SPDK_SUBSYS_TIMEOUT", &bio_spdk_subsys_timeout);
d_getenv_uint("DAOS_SPDK_SUBSYS_TIMEOUT", &bio_spdk_subsys_timeout);
D_INFO("SPDK subsystem fini timeout is %u ms\n", bio_spdk_subsys_timeout);

d_getenv_int("DAOS_SPDK_MAX_UNMAP_CNT", &bio_spdk_max_unmap_cnt);
d_getenv_uint("DAOS_SPDK_MAX_UNMAP_CNT", &bio_spdk_max_unmap_cnt);
if (bio_spdk_max_unmap_cnt == 0)
bio_spdk_max_unmap_cnt = UINT32_MAX;
D_INFO("SPDK batch blob unmap call count is %u\n", bio_spdk_max_unmap_cnt);

d_getenv_int("DAOS_MAX_ASYNC_SZ", &bio_max_async_sz);
d_getenv_uint("DAOS_MAX_ASYNC_SZ", &bio_max_async_sz);
D_INFO("Max async data size is set to %u bytes\n", bio_max_async_sz);

/* Hugepages disabled */
Expand Down
50 changes: 18 additions & 32 deletions src/cart/crt_init.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* (C) Copyright 2016-2023 Intel Corporation.
* (C) Copyright 2016-2024 Intel Corporation.
*
* SPDX-License-Identifier: BSD-2-Clause-Patent
*/
Expand Down Expand Up @@ -192,7 +192,7 @@ prov_data_init(struct crt_prov_gdata *prov_data, crt_provider_t provider,

/* Set max number of contexts. Defaults to the number of cores */
ctx_num = 0;
d_getenv_int("CRT_CTX_NUM", &ctx_num);
d_getenv_uint("CRT_CTX_NUM", &ctx_num);
if (opt)
max_num_ctx = ctx_num ? ctx_num : max(crt_gdata.cg_num_cores, opt->cio_ctx_max_num);
else
Expand Down Expand Up @@ -221,7 +221,7 @@ prov_data_init(struct crt_prov_gdata *prov_data, crt_provider_t provider,
if (share_addr) {
set_sep = true;
ctx_num = 0;
d_getenv_int("CRT_CTX_NUM", &ctx_num);
d_getenv_uint("CRT_CTX_NUM", &ctx_num);
max_num_ctx = ctx_num;
}
}
Expand Down Expand Up @@ -279,30 +279,30 @@ static int data_init(int server, crt_init_options_t *opt)
crt_gdata.cg_rpcid, crt_gdata.cg_num_cores);

/* Set context post init / post incr to tune number of pre-posted recvs */
d_getenv_int("D_POST_INIT", &post_init);
d_getenv_uint32_t("D_POST_INIT", &post_init);
crt_gdata.cg_post_init = post_init;
d_getenv_int("D_POST_INCR", &post_incr);
d_getenv_uint32_t("D_POST_INCR", &post_incr);
crt_gdata.cg_post_incr = post_incr;

is_secondary = 0;
/* Apply CART-890 workaround for server side only */
if (server) {
d_getenv_int("CRT_ENABLE_MEM_PIN", &mem_pin_enable);
d_getenv_uint("CRT_ENABLE_MEM_PIN", &mem_pin_enable);
if (mem_pin_enable == 1)
mem_pin_workaround();
} else {
/*
* Client-side envariable to indicate that the cluster
* is running using a secondary provider
*/
d_getenv_int("CRT_SECONDARY_PROVIDER", &is_secondary);
d_getenv_uint("CRT_SECONDARY_PROVIDER", &is_secondary);
}
crt_gdata.cg_provider_is_primary = (is_secondary) ? 0 : 1;

if (opt && opt->cio_crt_timeout != 0)
timeout = opt->cio_crt_timeout;
else
d_getenv_int("CRT_TIMEOUT", &timeout);
d_getenv_uint("CRT_TIMEOUT", &timeout);

if (timeout == 0 || timeout > 3600)
crt_gdata.cg_timeout = CRT_DEFAULT_TIMEOUT_S;
Expand All @@ -321,13 +321,13 @@ static int data_init(int server, crt_init_options_t *opt)
credits = opt->cio_ep_credits;
} else {
credits = CRT_DEFAULT_CREDITS_PER_EP_CTX;
d_getenv_int("CRT_CREDIT_EP_CTX", &credits);
d_getenv_uint("CRT_CREDIT_EP_CTX", &credits);
}

/* Enable quotas by default only on clients */
crt_gdata.cg_rpc_quota = server ? 0 : CRT_QUOTA_RPCS_DEFAULT;

d_getenv_int("D_QUOTA_RPCS", &crt_gdata.cg_rpc_quota);
d_getenv_uint("D_QUOTA_RPCS", &crt_gdata.cg_rpc_quota);

/* Must be set on the server when using UCX, will not affect OFI */
d_getenv_char("UCX_IB_FORK_INIT", &ucx_ib_fork_init);
Expand All @@ -339,13 +339,13 @@ static int data_init(int server, crt_init_options_t *opt)
}
}
if (server)
setenv("UCX_IB_FORK_INIT", "n", 1);
d_setenv("UCX_IB_FORK_INIT", "n", 1);

/* This is a workaround for CART-871 if universe size is not set */
d_getenv_int("FI_UNIVERSE_SIZE", &fi_univ_size);
d_getenv_uint("FI_UNIVERSE_SIZE", &fi_univ_size);
if (fi_univ_size == 0) {
D_INFO("FI_UNIVERSE_SIZE was not set; setting to 2048\n");
setenv("FI_UNIVERSE_SIZE", "2048", 1);
d_setenv("FI_UNIVERSE_SIZE", "2048", 1);
}

if (credits == 0) {
Expand Down Expand Up @@ -536,19 +536,6 @@ check_grpid(crt_group_id_t grpid)
return rc;
}

static void
apply_if_not_set(const char *env_name, const char *new_value)
{
char *old_val;

old_val = getenv(env_name);

if (old_val == NULL) {
D_INFO("%s not set, setting to %s\n", env_name, new_value);
setenv(env_name, new_value, true);
}
}

static void
prov_settings_apply(bool primary, crt_provider_t prov, crt_init_options_t *opt)
{
Expand All @@ -569,26 +556,25 @@ prov_settings_apply(bool primary, crt_provider_t prov, crt_init_options_t *opt)
if (prov == CRT_PROV_OFI_VERBS_RXM ||
prov == CRT_PROV_OFI_TCP_RXM) {
/* Use shared receive queues to avoid large mem consumption */
apply_if_not_set("FI_OFI_RXM_USE_SRX", "1");
d_setenv("FI_OFI_RXM_USE_SRX", "1", 0);

/* Only apply on the server side */
if (prov == CRT_PROV_OFI_TCP_RXM && crt_is_service())
apply_if_not_set("FI_OFI_RXM_DEF_TCP_WAIT_OBJ", "pollfd");

d_setenv("FI_OFI_RXM_DEF_TCP_WAIT_OBJ", "pollfd", 0);
}

if (prov == CRT_PROV_OFI_CXI)
mrc_enable = 1;

d_getenv_int("CRT_MRC_ENABLE", &mrc_enable);
d_getenv_uint("CRT_MRC_ENABLE", &mrc_enable);
if (mrc_enable == 0) {
D_INFO("Disabling MR CACHE (FI_MR_CACHE_MAX_COUNT=0)\n");
setenv("FI_MR_CACHE_MAX_COUNT", "0", 1);
d_setenv("FI_MR_CACHE_MAX_COUNT", "0", 1);
}

/* Use tagged messages for other providers, disable multi-recv */
if (prov != CRT_PROV_OFI_CXI && prov != CRT_PROV_OFI_TCP)
apply_if_not_set("NA_OFI_UNEXPECTED_TAG_MSG", "1");
d_setenv("NA_OFI_UNEXPECTED_TAG_MSG", "1", 0);

g_prov_settings_applied[prov] = true;
}
Expand Down
7 changes: 0 additions & 7 deletions src/cart/crt_iv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1695,10 +1695,6 @@ crt_iv_fetch(crt_iv_namespace_t ivns, uint32_t class_id,

/* The fetch info is contained on current server. */
if (rc == 0) {
/* Finish up the completion call back */
iv_ops->ivo_on_refresh(ivns_internal, iv_key, 0,
iv_value, false, 0x0, user_priv);

fetch_comp_cb(ivns_internal, class_id, iv_key, NULL,
iv_value, rc, cb_arg);

Expand All @@ -1710,9 +1706,6 @@ crt_iv_fetch(crt_iv_namespace_t ivns, uint32_t class_id,
return rc;
} else if (rc != -DER_IVCB_FORWARD) {
/* We got error, call the callback and exit */
iv_ops->ivo_on_refresh(ivns_internal, iv_key, 0,
NULL, false, rc, user_priv);

fetch_comp_cb(ivns_internal, class_id, iv_key, NULL,
NULL, rc, cb_arg);

Expand Down
Loading

0 comments on commit fb8c169

Please sign in to comment.