From 6600adb3cd6f9963bb4f4bffb2d9a1c952b94e01 Mon Sep 17 00:00:00 2001 From: frankist Date: Wed, 20 Nov 2024 13:08:50 +0100 Subject: [PATCH 1/4] sched: clean checks for UE allocation opportunities in time_rr --- lib/scheduler/policy/scheduler_time_rr.cpp | 56 ++++++++++--------- .../ue_scheduling/ue_cell_grid_allocator.cpp | 6 +- 2 files changed, 32 insertions(+), 30 deletions(-) diff --git a/lib/scheduler/policy/scheduler_time_rr.cpp b/lib/scheduler/policy/scheduler_time_rr.cpp index 0352b35d09..41d3ba3196 100644 --- a/lib/scheduler/policy/scheduler_time_rr.cpp +++ b/lib/scheduler/policy/scheduler_time_rr.cpp @@ -141,9 +141,21 @@ static unsigned compute_max_nof_rbs_per_ue_per_slot(const slice_ue_repository& return (std::min(bwp_crb_limits.length(), slice_max_rbs) / nof_ues_to_be_scheduled_per_slot); } -static bool can_allocate_dl_newtx(const slice_ue& ue_ref, ue_cell_index_t cell_index, srslog::basic_logger& logger) +static bool can_allocate_dl_newtx(const ue_resource_grid_view& res_grid, + const ue_cell& ue_cc, + slot_point pdsch_slot, + srslog::basic_logger& logger) { - const ue_cell& ue_cc = ue_ref.get_cell(cell_index); + srsran_assert(ue_cc.is_active() and not ue_cc.is_in_fallback_mode(), + "policy scheduler called for UE={} in fallback", + ue_cc.ue_index); + + if (res_grid.has_ue_dl_pdcch(ue_cc.cell_index, ue_cc.rnti()) or + not ue_cc.is_pdcch_enabled(res_grid.get_pdcch_slot(ue_cc.cell_index)) or not ue_cc.is_pdsch_enabled(pdsch_slot)) { + // UE is either already allocated for this slot (e.g. a reTx already took place) or it is not active. + return false; + } + if (not ue_cc.harqs.has_empty_dl_harqs()) { // No empty HARQs are available. Log this occurrence. if (ue_cc.harqs.find_pending_dl_retx().has_value()) { @@ -166,9 +178,21 @@ static bool can_allocate_dl_newtx(const slice_ue& ue_ref, ue_cell_index_t cell_i return true; } -static bool can_allocate_ul_newtx(const slice_ue& ue_ref, ue_cell_index_t cell_index, srslog::basic_logger& logger) +static bool can_allocate_ul_newtx(const slice_ue& ue_ref, + const ue_cell& ue_cc, + slot_point pdcch_slot, + slot_point pusch_slot, + srslog::basic_logger& logger) { - const ue_cell& ue_cc = ue_ref.get_cell(cell_index); + srsran_assert(ue_cc.is_active() and not ue_cc.is_in_fallback_mode(), + "policy scheduler called for UE={} in fallback", + ue_cc.ue_index); + + if (not ue_cc.is_pdcch_enabled(pdcch_slot) or not ue_cc.is_ul_enabled(pusch_slot)) { + // Either the PDCCH slot or PUSCH slots are not available. + return false; + } + if (not ue_cc.harqs.has_empty_ul_harqs()) { // No empty HARQs are available. Log this occurrence. if (ue_cc.harqs.find_pending_ul_retx().has_value()) { @@ -287,18 +311,8 @@ static dl_alloc_result alloc_dl_ue_newtx(const slice_ue& u, // Prioritize PCell over SCells. for (unsigned i = 0; i != u.nof_cells(); ++i) { const ue_cell& ue_cc = u.get_cell(to_ue_cell_index(i)); - srsran_assert(ue_cc.is_active() and not ue_cc.is_in_fallback_mode(), - "policy scheduler called for UE={} in fallback", - ue_cc.ue_index); - - if (res_grid.has_ue_dl_pdcch(ue_cc.cell_index, u.crnti()) or - not ue_cc.is_pdcch_enabled(res_grid.get_pdcch_slot(ue_cc.cell_index)) or - not ue_cc.is_pdsch_enabled(slice_candidate.get_slot_tx())) { - // UE is either already allocated for this slot (e.g. a reTx already took place) or it is not active. - return {alloc_status::skip_ue}; - } - if (can_allocate_dl_newtx(u, to_ue_cell_index(i), logger)) { + if (can_allocate_dl_newtx(res_grid, ue_cc, slice_candidate.get_slot_tx(), logger)) { ue_pdsch_grant grant{&u, ue_cc.cell_index, INVALID_HARQ_ID, u.pending_dl_newtx_bytes(), max_pdsch_rbs}; const dl_alloc_result result = pdsch_alloc.allocate_dl_grant(grant); // If the allocation failed due to invalid parameters, we continue iteration. @@ -372,17 +386,9 @@ static ul_alloc_result alloc_ul_ue_newtx(const slice_ue& u, // Prioritize PCell over SCells. for (unsigned i = 0; i != u.nof_cells(); ++i) { const ue_cell& ue_cc = u.get_cell(to_ue_cell_index(i)); - srsran_assert(ue_cc.is_active() and not ue_cc.is_in_fallback_mode(), - "policy scheduler called for UE={} in fallback", - ue_cc.ue_index); - - if (not ue_cc.is_pdcch_enabled(res_grid.get_pdcch_slot(ue_cc.cell_index)) or - not ue_cc.is_ul_enabled(slice_candidate.get_slot_tx())) { - // Either the PDCCH slot or PUSCH slots are not available. - continue; - } - if (can_allocate_ul_newtx(u, to_ue_cell_index(i), logger)) { + if (can_allocate_ul_newtx( + u, ue_cc, res_grid.get_pdcch_slot(ue_cc.cell_index), slice_candidate.get_slot_tx(), logger)) { ue_pusch_grant grant{&u, ue_cc.cell_index, INVALID_HARQ_ID, pending_newtx_bytes, max_grant_rbs}; const ul_alloc_result result = pusch_alloc.allocate_ul_grant(grant); // If the allocation failed due to invalid parameters, we continue iteration. diff --git a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp index 9419d0ac62..408e514bc9 100644 --- a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp +++ b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp @@ -513,11 +513,7 @@ ue_cell_grid_allocator::allocate_ul_grant(const ue_pusch_grant& grant, ran_slice grant.cell_index); return {alloc_status::skip_ue}; } - - if (ue_cc->is_in_fallback_mode()) { - // Skip allocation for UEs in fallback mode, as it is handled by the SRB fallback scheduler. - return {alloc_status::skip_ue}; - } + srsran_assert(not ue_cc->is_in_fallback_mode(), "Invalid UE candidate"); const ue_cell_configuration& ue_cell_cfg = ue_cc->cfg(); const cell_configuration& cell_cfg = ue_cell_cfg.cell_cfg_common; From 2f3989f826e9d33abd944d768a0bd834c13990e9 Mon Sep 17 00:00:00 2001 From: frankist Date: Wed, 20 Nov 2024 13:26:42 +0100 Subject: [PATCH 2/4] sched: add RACH in scheduler logger --- lib/scheduler/logging/scheduler_result_logger.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/scheduler/logging/scheduler_result_logger.cpp b/lib/scheduler/logging/scheduler_result_logger.cpp index 52262b50d9..564a3708c2 100644 --- a/lib/scheduler/logging/scheduler_result_logger.cpp +++ b/lib/scheduler/logging/scheduler_result_logger.cpp @@ -302,6 +302,17 @@ void scheduler_result_logger::log_debug(const sched_result& result, std::chrono: srs.sequence_id); } + if (log_broadcast) { + for (const prach_occasion_info& prach : result.ul.prachs) { + fmt::format_to(fmtbuf, + "\n- PRACH: pci={} format={} nof_occasions={} nof_preambles={}", + prach.pci, + to_string(prach.format), + prach.nof_prach_occasions, + prach.nof_preamble_indexes); + } + } + if (fmtbuf.size() > 0) { const unsigned nof_pdschs = result.dl.paging_grants.size() + result.dl.rar_grants.size() + result.dl.ue_grants.size() + result.dl.bc.sibs.size(); From 818544f456ddc78fa1b1535cede44aaafb1f5ee3 Mon Sep 17 00:00:00 2001 From: frankist Date: Wed, 20 Nov 2024 19:13:10 +0100 Subject: [PATCH 3/4] sched: extend last PUSCH allocation when the scheduler runs out of PDCCH --- lib/scheduler/ue_context/ue_cell.h | 3 +- .../ue_scheduling/ue_cell_grid_allocator.cpp | 303 ++++++++++++++---- .../ue_scheduling/ue_cell_grid_allocator.h | 5 +- .../ue_scheduling/ue_scheduler_impl.cpp | 2 + 4 files changed, 243 insertions(+), 70 deletions(-) diff --git a/lib/scheduler/ue_context/ue_cell.h b/lib/scheduler/ue_context/ue_cell.h index 5d7fe55ba2..c7e6c5b689 100644 --- a/lib/scheduler/ue_context/ue_cell.h +++ b/lib/scheduler/ue_context/ue_cell.h @@ -147,7 +147,8 @@ class ue_cell const ue_link_adaptation_controller& link_adaptation_controller() const { return ue_mcs_calculator; } - ul_power_controller& get_ul_power_controller() { return ul_pwr_controller; } + ul_power_controller& get_ul_power_controller() { return ul_pwr_controller; } + const ul_power_controller& get_ul_power_controller() const { return ul_pwr_controller; } /// \brief Returns an estimated DL rate in bytes per slot based on the given input parameters. double get_estimated_dl_rate(const pdsch_config_params& pdsch_cfg, sch_mcs_index mcs, unsigned nof_prbs) const; diff --git a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp index 408e514bc9..e50ee1709b 100644 --- a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp +++ b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp @@ -490,6 +490,40 @@ dl_alloc_result ue_cell_grid_allocator::allocate_dl_grant(const ue_pdsch_grant& return {alloc_status::invalid_params}; } +static crb_interval get_ul_rb_limits(const scheduler_ue_expert_config& expert_cfg, const search_space_info& ss_info) +{ + const unsigned start_rb = std::max(expert_cfg.pusch_crb_limits.start(), ss_info.ul_crb_lims.start()); + const unsigned end_rb = std::min(expert_cfg.pusch_crb_limits.stop(), ss_info.ul_crb_lims.stop()); + return {start_rb, std::max(start_rb, end_rb)}; +} + +static unsigned +adjust_nof_rbs_to_transform_precoding(unsigned rbs, const ue_cell& ue_cc, dci_ul_rnti_config_type dci_type) +{ + // Ensure the number of PRB is valid if the transform precoder is used. The condition the PUSCH bandwidth with + // transform precoder is defined in TS 38.211 Section 6.1.3. The number of PRB must be lower than or equal to + // current number of PRB. + bool use_transform_precoding = dci_type == dci_ul_rnti_config_type::c_rnti_f0_1 + ? ue_cc.cfg().use_pusch_transform_precoding_dci_0_1() + : ue_cc.cfg().cell_cfg_common.use_msg3_transform_precoder(); + if (use_transform_precoding) { + rbs = get_transform_precoding_nearest_lower_nof_prb_valid(rbs).value_or(rbs); + } + return rbs; +} + +static unsigned adjust_ue_max_ul_nof_rbs(const scheduler_ue_expert_config& expert_cfg, + const ue_cell& ue_cc, + dci_ul_rnti_config_type dci_type, + unsigned max_rbs) +{ + max_rbs = std::min(expert_cfg.pusch_nof_rbs.stop(), max_rbs); + max_rbs = ue_cc.get_ul_power_controller().adapt_pusch_prbs_to_phr(max_rbs); + max_rbs = adjust_nof_rbs_to_transform_precoding(max_rbs, ue_cc, dci_type); + + return max_rbs; +} + ul_alloc_result ue_cell_grid_allocator::allocate_ul_grant(const ue_pusch_grant& grant, ran_slice_id_t slice_id, slot_point pusch_slot) { @@ -607,28 +641,6 @@ ue_cell_grid_allocator::allocate_ul_grant(const ue_pusch_grant& grant, ran_slice return {alloc_status::skip_slot}; } - // When checking the number of remaining grants for PUSCH, take into account that the PUCCH grants for this UE will - // be removed when multiplexing the UCI on PUSCH. - unsigned pusch_pdu_rem_space = get_space_left_for_pusch_pdus(pusch_alloc.result, u.crnti, expert_cfg); - if (pusch_pdu_rem_space == 0) { - if (pusch_alloc.result.ul.puschs.size() >= expert_cfg.max_puschs_per_slot) { - logger.info( - "ue={} rnti={}: Failed to allocate PUSCH in slot={}. Cause: Max number of PUSCHs per slot {} was reached.", - u.ue_index, - u.crnti, - pusch_alloc.slot, - expert_cfg.max_puschs_per_slot); - } else { - logger.info("ue={} rnti={}: Failed to allocate PUSCH in slot={}. Cause: Max number of UL grants per slot {} " - "was reached.", - u.ue_index, - u.crnti, - pusch_alloc.slot, - expert_cfg.max_puschs_per_slot); - } - return {alloc_status::skip_slot}; - } - // [Implementation-defined] We skip allocation of PUSCH if there is already a PUCCH grant scheduled using common // PUCCH resources. if (get_uci_alloc(grant.cell_index).has_uci_harq_on_common_pucch_res(u.crnti, pusch_alloc.slot)) { @@ -654,77 +666,69 @@ ue_cell_grid_allocator::allocate_ul_grant(const ue_pusch_grant& grant, ran_slice } // Apply RB allocation limits. - const unsigned start_rb = std::max(expert_cfg.pusch_crb_limits.start(), ss_info.ul_crb_lims.start()); - const unsigned end_rb = std::min(expert_cfg.pusch_crb_limits.stop(), ss_info.ul_crb_lims.stop()); - if (start_rb >= end_rb) { - logger.debug("ue={} rnti={}: Failed to allocate PUSCH in slot={}. Cause: Invalid RB allocation range [{}, {})", - u.ue_index, - u.crnti, - pusch_alloc.slot, - start_rb, - end_rb); + crb_interval ul_crb_lims = get_ul_rb_limits(expert_cfg, ss_info); + if (ul_crb_lims.empty()) { + logger.info("ue={} rnti={}: Failed to allocate PUSCH in slot={}. Cause: Invalid RB allocation range [{}, {})", + u.ue_index, + u.crnti, + pusch_alloc.slot, + ul_crb_lims.start(), + ul_crb_lims.stop()); return {alloc_status::skip_slot}; } - const crb_interval ul_crb_lims = {start_rb, end_rb}; - const prb_bitmap used_crbs = pusch_alloc.ul_res_grid.used_crbs(scs, ul_crb_lims, pusch_td_cfg.symbols); + const prb_bitmap used_crbs = pusch_alloc.ul_res_grid.used_crbs(scs, ul_crb_lims, pusch_td_cfg.symbols); if (used_crbs.all()) { slots_with_no_pusch_space.push_back(pusch_alloc.slot); return {alloc_status::skip_slot}; } // Compute the MCS and the number of PRBs, depending on the pending bytes to transmit. - grant_prbs_mcs mcs_prbs = - is_retx ? grant_prbs_mcs{h_ul->get_grant_params().mcs, h_ul->get_grant_params().rbs.type1().length()} - : ue_cc->required_ul_prbs(pusch_td_cfg, grant.recommended_nof_bytes.value(), dci_type); - // Try to limit the grant PRBs. - if (not is_retx) { - // [Implementation-defined] Check whether max. UL grants per slot is reached if PUSCH for current UE succeeds. If - // so, allocate remaining RBs to the current UE only if it's a new Tx. - if (pusch_pdu_rem_space == 1 and not u.has_pending_sr()) { - mcs_prbs.n_prbs = rb_helper::find_empty_interval_of_length(used_crbs, used_crbs.size(), 0).length(); - } + grant_prbs_mcs mcs_prbs; + if (is_retx) { + mcs_prbs = grant_prbs_mcs{h_ul->get_grant_params().mcs, h_ul->get_grant_params().rbs.type1().length()}; + } else { + // Compute MCS and PRBs based on grant parameters. + mcs_prbs = ue_cc->required_ul_prbs(pusch_td_cfg, grant.recommended_nof_bytes.value(), dci_type); + + // Apply minimum RB limit per grant. + mcs_prbs.n_prbs = std::max(mcs_prbs.n_prbs, expert_cfg.pusch_nof_rbs.start()); + // Due to the pre-allocated UCI bits, MCS 0 and PRB 1 would not leave any space for the payload on the TBS, as // all the space would be taken by the UCI bits. As a result of this, the effective code rate would be 0 and the // allocation would fail and be postponed to the next slot. // [Implementation-defined] In our tests, we have seen that MCS 5 with 1 PRB can lead (depending on the // configuration) to a non-valid MCS-PRB allocation; therefore, we set 6 as minimum value for 1 PRB. // TODO: Remove this part and handle the problem with a loop that is general for any configuration. - const sch_mcs_index min_mcs_for_1_prb = static_cast(6U); - const unsigned min_allocable_prbs = 1U; - if (mcs_prbs.mcs < min_mcs_for_1_prb and mcs_prbs.n_prbs <= min_allocable_prbs) { - ++mcs_prbs.n_prbs; - } - // [Implementation-defined] - // Check whether to allocate all remaining RBs or not. This is done to ensure we allocate only X nof. UEs per slot - // and not X+1 nof. UEs. One way is by checking if the emtpy interval is less than 2 times the required RBs. If - // so, allocate all remaining RBs. NOTE: This approach won't hold good in case of low traffic scenario. - const unsigned twice_grant_crbs_length = - rb_helper::find_empty_interval_of_length(used_crbs, mcs_prbs.n_prbs * 2, 0).length(); - if (twice_grant_crbs_length < (mcs_prbs.n_prbs * 2)) { - mcs_prbs.n_prbs = twice_grant_crbs_length; + const unsigned min_allocable_prbs = 1U; + if (mcs_prbs.n_prbs <= min_allocable_prbs) { + const sch_mcs_index min_mcs_for_1_prb = static_cast(6U); + if (mcs_prbs.mcs < min_mcs_for_1_prb) { + ++mcs_prbs.n_prbs; + } } + // Limit nof. RBs to allocate to maximum RBs provided in grant. if (grant.max_nof_rbs.has_value()) { mcs_prbs.n_prbs = std::min(mcs_prbs.n_prbs, grant.max_nof_rbs.value()); } - // Re-apply nof. PUSCH RBs to allocate limits. - mcs_prbs.n_prbs = std::max(mcs_prbs.n_prbs, expert_cfg.pusch_nof_rbs.start()); - mcs_prbs.n_prbs = std::min(mcs_prbs.n_prbs, expert_cfg.pusch_nof_rbs.stop()); - // Ensure the number of PRB is valid if the transform precoder is used. The condition the PUSCH bandwidth with - // transform precoder is defined in TS 38.211 Section 6.1.3. The number of PRB must be lower than or equal to - // current number of PRB. - if ((dci_type == dci_ul_rnti_config_type::c_rnti_f0_1) - ? ue_cell_cfg.use_pusch_transform_precoding_dci_0_1() - : ue_cell_cfg.cell_cfg_common.use_msg3_transform_precoder()) { - mcs_prbs.n_prbs = - get_transform_precoding_nearest_lower_nof_prb_valid(mcs_prbs.n_prbs).value_or(mcs_prbs.n_prbs); + + // [Implementation-defined] + // Sometimes just a few 1-4 RBs are left in the grid, and the scheduler policy will try to fit a tiny PUSCH in it. + // We want to avoid this, by ensuring this grant fills the remaining RBs. + unsigned nof_rbs_left = (~used_crbs).count(); + nof_rbs_left -= std::min(nof_rbs_left, mcs_prbs.n_prbs); + if (nof_rbs_left > 0 and nof_rbs_left < 5) { + mcs_prbs.n_prbs += nof_rbs_left; } + + // Re-apply nof. PUSCH RBs to allocate limits. + mcs_prbs.n_prbs = adjust_ue_max_ul_nof_rbs(expert_cfg, *ue_cc, dci_type, mcs_prbs.n_prbs); } // NOTE: This should never happen, but it's safe not to proceed if we get n_prbs == 0. if (mcs_prbs.n_prbs == 0) { - logger.debug( + logger.info( "ue={} rnti={}: Failed to allocate PUSCH in slot={}. Cause: MCS and PRBs computation resulted in no PRBs " "allocated to this UE", u.ue_index, @@ -1016,3 +1020,166 @@ ue_cell_grid_allocator::allocate_ul_grant(const ue_pusch_grant& grant, ran_slice // No candidates for PUSCH allocation. return {alloc_status::invalid_params}; } + +void ue_cell_grid_allocator::post_process_results() +{ + for (const cell_t& cell : cells) { + auto& pdcch_alloc = get_res_alloc(cell.cell_index)[0]; + + // In case PUSCHs allocations have been made, we try to ensure that RBs are not left empty. + auto& ul_pdcchs = pdcch_alloc.result.dl.ul_pdcchs; + if (ul_pdcchs.empty()) { + continue; + } + + bounded_bitset traversed_k2s(SCHEDULER_MAX_K2); + for (auto& pdcch : ul_pdcchs) { + if (pdcch.dci.type != dci_ul_rnti_config_type::c_rnti_f0_1) { + continue; + } + const auto& u = *ues.find_by_rnti(pdcch.ctx.rnti); + const ue_cell& ue_cell = *u.find_cell(cell.cell_index); + const uint8_t pusch_td_idx = pdcch.dci.c_rnti_f0_1.time_resource; + const search_space_info& ss = ue_cell.cfg().search_space(pdcch.ctx.context.ss_id); + const uint8_t k2 = ss.pusch_time_domain_list[pusch_td_idx].k2; + if (not traversed_k2s.test(k2 - 1)) { + post_process_ul_results(cell.cell_index, pdcch_alloc.slot + k2); + traversed_k2s.set(k2 - 1); + } + } + } +} + +void ue_cell_grid_allocator::post_process_ul_results(du_cell_index_t cell_idx, slot_point pusch_slot) +{ + // Note: For now, we just expand the last allocation to fill empty RBs. + // TODO: Fairer algorithm to fill remaining RBs that accounts for the UE buffer states as well. + + const cell_t& cell = cells[cell_idx]; + cell_resource_allocator& cell_alloc = get_res_alloc(cell.cell_index); + auto& pusch_alloc = cell_alloc[pusch_slot]; + if (pusch_alloc.result.ul.puschs.empty()) { + // There are no UL allocations for this slot. Move on to next cell. + return; + } + const cell_configuration& cell_cfg = cell_alloc.cfg; + const subcarrier_spacing scs = cell_cfg.ul_cfg_common.init_ul_bwp.generic_params.scs; + + // Use last PUSCH to get reference UE config for this candidate. + ul_sched_info& last_pusch = pusch_alloc.result.ul.puschs.back(); + if (not last_pusch.pusch_cfg.new_data) { + // It is a retx. We cannot resize it. + return; + } + if (last_pusch.pusch_cfg.rbs.is_type0()) { + // Not supported yet. + return; + } + + const vrb_interval& vrbs = last_pusch.pusch_cfg.rbs.type1(); + if (vrbs.length() >= expert_cfg.pusch_nof_rbs.length()) { + // The last UE reached max grant size. + return; + } + + const ue& ue_ref = ues[last_pusch.context.ue_index]; + const ue_cell& ue_cc = *ue_ref.find_cell(cell_cfg.cell_index); + + if (ue_ref.pending_ul_newtx_bytes() == 0) { + // No point in expanding UE grant if it has no more bytes to transmit. + return; + } + + const search_space_info& ss_info = ue_cc.cfg().search_space(last_pusch.context.ss_id); + const crb_interval ul_crb_lims = get_ul_rb_limits(expert_cfg, ss_info); + + const prb_bitmap used_crbs = pusch_alloc.ul_res_grid.used_crbs(scs, ul_crb_lims, last_pusch.pusch_cfg.symbols); + if (used_crbs.all()) { + // All CRBs were filled. + return; + } + + std::optional h_ul = ue_cc.harqs.ul_harq(to_harq_id(last_pusch.pusch_cfg.harq_id)); + if (not h_ul.has_value()) { + logger.error("Could not find HARQ id for existing PUSCH"); + return; + } + const dci_ul_rnti_config_type dci_type = h_ul->get_grant_params().dci_cfg_type; + if (dci_type != dci_ul_rnti_config_type::c_rnti_f0_1) { + // Only expansion for C-RNTI f0_1 supported. + return; + } + + // Check if there is a gap at the right of the last UE. + const bwp_configuration& active_bwp = *last_pusch.pusch_cfg.bwp_cfg; + crb_interval crbs = rb_helper::vrb_to_crb_ul_non_interleaved(vrbs, active_bwp.crbs.start()); + // Account for limits in number of RBs that can be allocated. + const unsigned max_crbs = adjust_ue_max_ul_nof_rbs(expert_cfg, ue_cc, dci_type, active_bwp.crbs.stop() - crbs.stop()); + + crb_interval empty_crbs = rb_helper::find_empty_interval_of_length(used_crbs, max_crbs, crbs.stop()); + if (empty_crbs.empty()) { + // Could not extend existing PUSCH. + return; + } + // There are RBs empty to the left of the last allocation. Let's expand the allocation. + + crb_interval new_crbs{crbs.start(), empty_crbs.stop()}; + new_crbs.resize(adjust_nof_rbs_to_transform_precoding(new_crbs.length(), ue_cc, dci_type)); + + if (new_crbs.length() <= crbs.length()) { + // There is no growth possible. + return; + } + + // Find respective PDCCH. + auto& pdcch_alloc = cell_alloc[pusch_slot - last_pusch.context.k2]; + auto it = + std::find_if(pdcch_alloc.result.dl.ul_pdcchs.begin(), + pdcch_alloc.result.dl.ul_pdcchs.end(), + [&](const pdcch_ul_information& pdcch) { return pdcch.ctx.rnti == last_pusch.pusch_cfg.rnti; }); + if (it == pdcch_alloc.result.dl.ul_pdcchs.end()) { + logger.error( + "rnti={}: Cannot find PDCCH associated with the given PUSCH at slot={}", last_pusch.pusch_cfg.rnti, pusch_slot); + return; + } + pdcch_ul_information& pdcch = *it; + + vrb_interval new_vrbs = rb_helper::crb_to_vrb_ul_non_interleaved(new_crbs, active_bwp.crbs.start()); + + // Mark resources as occupied in the ResourceGrid. + empty_crbs = {crbs.stop(), new_crbs.stop()}; + pusch_alloc.ul_res_grid.fill(grant_info{scs, last_pusch.pusch_cfg.symbols, empty_crbs}); + + // Recompute MCS and TBS. + const search_space_info& ss = ue_cc.cfg().search_space(pdcch.ctx.context.ss_id); + const auto& pusch_td_cfg = ss.pusch_time_domain_list[pdcch.dci.c_rnti_f0_1.time_resource]; + const unsigned nof_harq_bits = + last_pusch.uci.has_value() and last_pusch.uci->harq.has_value() ? last_pusch.uci->harq->harq_ack_nof_bits : 0; + const unsigned is_csi_rep = last_pusch.uci.has_value() and last_pusch.uci->csi.has_value() + ? last_pusch.uci->csi.value().csi_part1_nof_bits > 0 + : 0; + auto pusch_cfg = get_pusch_config_f0_1_c_rnti( + ue_cc.cfg(), pusch_td_cfg, last_pusch.pusch_cfg.nof_layers, nof_harq_bits, is_csi_rep); + bool contains_dc = + dc_offset_helper::is_contained(cell_cfg.expert_cfg.ue.initial_ul_dc_offset, cell_cfg.nof_ul_prbs, crbs); + std::optional mcs_tbs_info = + compute_ul_mcs_tbs(pusch_cfg, &ue_cc.cfg(), last_pusch.pusch_cfg.mcs_index, new_crbs.length(), contains_dc); + if (not mcs_tbs_info.has_value()) { + return; + } + + // Update DCI. + pdcch.dci.c_rnti_f0_1.frequency_resource = ra_frequency_type1_get_riv( + ra_frequency_type1_configuration{active_bwp.crbs.length(), vrbs.start(), vrbs.length()}); + + // Update PUSCH. + last_pusch.pusch_cfg.rbs = new_vrbs; + last_pusch.pusch_cfg.tb_size_bytes = mcs_tbs_info->tbs; + + // Update HARQ. + ul_harq_alloc_context pusch_sched_ctx; + pusch_sched_ctx.dci_cfg_type = h_ul->get_grant_params().dci_cfg_type; + pusch_sched_ctx.olla_mcs = h_ul->get_grant_params().olla_mcs; + pusch_sched_ctx.slice_id = h_ul->get_grant_params().slice_id; + h_ul->save_grant_params(pusch_sched_ctx, last_pusch.pusch_cfg); +} diff --git a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.h b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.h index 50efcf7cc1..c9d83680f2 100644 --- a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.h +++ b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.h @@ -13,7 +13,6 @@ #include "../pdcch_scheduling/pdcch_resource_allocator.h" #include "../policy/ue_allocator.h" #include "../slicing/ran_slice_candidate.h" -#include "../uci_scheduling/uci_scheduler.h" #include "ue_repository.h" #include "srsran/scheduler/config/scheduler_expert_config.h" @@ -42,6 +41,8 @@ class ue_cell_grid_allocator ul_alloc_result allocate_ul_grant(const ue_pusch_grant& grant, ran_slice_id_t slice_id, slot_point pusch_slot); + void post_process_results(); + private: struct cell_t { du_cell_index_t cell_index; @@ -62,6 +63,8 @@ class ue_cell_grid_allocator return *cells[cell_index].cell_alloc; } + void post_process_ul_results(du_cell_index_t cell_idx, slot_point pusch_slot); + const scheduler_ue_expert_config& expert_cfg; ue_repository& ues; diff --git a/lib/scheduler/ue_scheduling/ue_scheduler_impl.cpp b/lib/scheduler/ue_scheduling/ue_scheduler_impl.cpp index 2c28eeceb9..5b14ba848b 100644 --- a/lib/scheduler/ue_scheduling/ue_scheduler_impl.cpp +++ b/lib/scheduler/ue_scheduling/ue_scheduler_impl.cpp @@ -75,6 +75,8 @@ void ue_scheduler_impl::run_sched_strategy(slot_point slot_tx, du_cell_index_t c slice_pusch_alloc, ue_res_grid_view, *ul_slice_candidate, cells[cell_index].cell_harqs.pending_ul_retxs()); ul_slice_candidate = cells[cell_index].slice_sched.get_next_ul_candidate(); } + + ue_alloc.post_process_results(); } void ue_scheduler_impl::update_harq_pucch_counter(cell_resource_allocator& cell_alloc) From a7131308a770339389e54f6540592755047575e8 Mon Sep 17 00:00:00 2001 From: frankist Date: Wed, 20 Nov 2024 21:59:18 +0100 Subject: [PATCH 4/4] sched: fix failing ue scheduler test --- lib/scheduler/policy/ue_allocator.h | 1 - .../ue_scheduling/ue_cell_grid_allocator.cpp | 6 +- .../ue_scheduling/ue_grid_allocator_test.cpp | 226 ++++++++---------- 3 files changed, 108 insertions(+), 125 deletions(-) diff --git a/lib/scheduler/policy/ue_allocator.h b/lib/scheduler/policy/ue_allocator.h index 1da40e18c1..bf78c51bbd 100644 --- a/lib/scheduler/policy/ue_allocator.h +++ b/lib/scheduler/policy/ue_allocator.h @@ -15,7 +15,6 @@ #include "../ue_context/ue.h" #include "../ue_scheduling/ue_repository.h" #include "../ue_scheduling/ue_scheduler.h" -#include "srsran/ran/slot_point.h" namespace srsran { diff --git a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp index e50ee1709b..955eb45187 100644 --- a/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp +++ b/lib/scheduler/ue_scheduling/ue_cell_grid_allocator.cpp @@ -691,9 +691,6 @@ ue_cell_grid_allocator::allocate_ul_grant(const ue_pusch_grant& grant, ran_slice // Compute MCS and PRBs based on grant parameters. mcs_prbs = ue_cc->required_ul_prbs(pusch_td_cfg, grant.recommended_nof_bytes.value(), dci_type); - // Apply minimum RB limit per grant. - mcs_prbs.n_prbs = std::max(mcs_prbs.n_prbs, expert_cfg.pusch_nof_rbs.start()); - // Due to the pre-allocated UCI bits, MCS 0 and PRB 1 would not leave any space for the payload on the TBS, as // all the space would be taken by the UCI bits. As a result of this, the effective code rate would be 0 and the // allocation would fail and be postponed to the next slot. @@ -722,6 +719,9 @@ ue_cell_grid_allocator::allocate_ul_grant(const ue_pusch_grant& grant, ran_slice mcs_prbs.n_prbs += nof_rbs_left; } + // Apply minimum RB limit per grant. + mcs_prbs.n_prbs = std::max(mcs_prbs.n_prbs, expert_cfg.pusch_nof_rbs.start()); + // Re-apply nof. PUSCH RBs to allocate limits. mcs_prbs.n_prbs = adjust_ue_max_ul_nof_rbs(expert_cfg, *ue_cc, dci_type, mcs_prbs.n_prbs); } diff --git a/tests/unittests/scheduler/ue_scheduling/ue_grid_allocator_test.cpp b/tests/unittests/scheduler/ue_scheduling/ue_grid_allocator_test.cpp index e3d4d2a4c8..88e584412b 100644 --- a/tests/unittests/scheduler/ue_scheduling/ue_grid_allocator_test.cpp +++ b/tests/unittests/scheduler/ue_scheduling/ue_grid_allocator_test.cpp @@ -51,10 +51,7 @@ class ue_grid_allocator_tester : public ::testing::TestWithParam srslog::init(); // Initialize resource grid. - res_grid.slot_indication(current_slot); - pdcch_alloc.slot_indication(current_slot); - pucch_alloc.slot_indication(current_slot); - uci_alloc.slot_indication(current_slot); + slot_indication(); alloc.add_cell(to_du_cell_index(0), pdcch_alloc, uci_alloc, res_grid); } @@ -68,7 +65,7 @@ class ue_grid_allocator_tester : public ::testing::TestWithParam return next_slot; } - void run_slot() + void slot_indication(std::function on_each_slot = []() {}) { ++current_slot; logger.set_context(current_slot.sfn(), current_slot.slot_index()); @@ -80,17 +77,24 @@ class ue_grid_allocator_tester : public ::testing::TestWithParam uci_alloc.slot_indication(current_slot); ues.slot_indication(current_slot); + on_each_slot(); + + alloc.post_process_results(); + // Log scheduler results. res_logger.on_scheduler_result(res_grid[0].result); } - bool run_until(unique_function condition, unsigned max_slot_count = 1000) + bool run_until(std::function to_run, unique_function until, unsigned max_slot_count = 1000) { + if (until()) { + return true; + } for (unsigned count = 0; count != max_slot_count; ++count) { - if (condition()) { + slot_indication(to_run); + if (until()) { return true; } - run_slot(); } return false; } @@ -178,9 +182,8 @@ TEST_P(ue_grid_allocator_tester, .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); ASSERT_TRUE(crb_lims.contains(res_grid[0].result.dl.ue_grants.back().pdsch_cfg.rbs.type1())); } @@ -203,9 +206,8 @@ TEST_P(ue_grid_allocator_tester, when_using_non_fallback_dci_format_use_mcs_tabl .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); ASSERT_EQ(res_grid[0].result.dl.ue_grants.back().pdsch_cfg.codewords.back().mcs_table, srsran::pdsch_mcs_table::qam256); } @@ -227,9 +229,8 @@ TEST_P(ue_grid_allocator_tester, allocates_pdsch_restricted_to_recommended_max_n .recommended_nof_bytes = sched_bytes, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant1, dummy_slice_id).status; }, + [&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); // Successfully allocates PDSCH corresponding to the grant. ASSERT_GE(find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants)->pdsch_cfg.rbs.type1().length(), grant1.max_nof_rbs); @@ -252,11 +253,8 @@ TEST_P(ue_grid_allocator_tester, allocates_pusch_restricted_to_recommended_max_n .recommended_nof_bytes = recommended_nof_bytes_to_schedule, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)).status == - alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)); }, + [&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); // Successfully allocates PUSCH corresponding to the grant. ASSERT_EQ(find_ue_pusch(u1.crnti, res_grid[0].result.ul)->pusch_cfg.rbs.type1().length(), grant1.max_nof_rbs); } @@ -279,11 +277,8 @@ TEST_P(ue_grid_allocator_tester, does_not_allocate_pusch_with_all_remaining_rbs_ const crb_interval cell_crbs = {cell_cfg.ul_cfg_common.init_ul_bwp.generic_params.crbs.start(), cell_cfg.ul_cfg_common.init_ul_bwp.generic_params.crbs.stop()}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)).status == - alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)); }, + [&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); // Successfully allocates PUSCH corresponding to the grant. ASSERT_LT(find_ue_pusch(u1.crnti, res_grid[0].result.ul)->pusch_cfg.rbs.type1().length(), cell_crbs.length()); } @@ -309,11 +304,12 @@ TEST_P(ue_grid_allocator_tester, no_two_pdschs_are_allocated_in_same_slot_for_a_ .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success or - alloc.allocate_dl_grant(grant2, dummy_slice_id).status == alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until( + [&]() { + alloc.allocate_dl_grant(grant1, dummy_slice_id).status; + alloc.allocate_dl_grant(grant2, dummy_slice_id).status; + }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); // Only one PDSCH per slot per UE. ASSERT_EQ(res_grid[0].result.dl.ue_grants.size(), 1); @@ -328,8 +324,6 @@ TEST_P(ue_grid_allocator_tester, no_two_puschs_are_allocated_in_same_slot_for_a_ const ue& u = add_ue(ue_creation_req); - slot_point pusch_slot = get_next_ul_slot(current_slot); - // First PUSCH grant for the UE. const ue_pusch_grant grant1{.user = &slice_ues[u.ue_index], .cell_index = to_du_cell_index(0), @@ -342,11 +336,13 @@ TEST_P(ue_grid_allocator_tester, no_two_puschs_are_allocated_in_same_slot_for_a_ .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, pusch_slot).status == alloc_status::success or - alloc.allocate_ul_grant(grant2, dummy_slice_id, pusch_slot).status == alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { return find_ue_pusch(u.crnti, res_grid[0].result.ul) != nullptr; })); + ASSERT_TRUE(run_until( + [&]() { + slot_point pusch_slot = get_next_ul_slot(current_slot); + alloc.allocate_ul_grant(grant1, dummy_slice_id, pusch_slot); + alloc.allocate_ul_grant(grant2, dummy_slice_id, pusch_slot); + }, + [&]() { return find_ue_pusch(u.crnti, res_grid[0].result.ul) != nullptr; })); // Only one PUSCH per slot per UE. ASSERT_EQ(res_grid[0].result.ul.puschs.size(), 1); @@ -361,19 +357,19 @@ TEST_P(ue_grid_allocator_tester, consecutive_puschs_for_a_ue_are_allocated_in_in const ue& u = add_ue(ue_creation_req); - slot_point pusch_slot = get_next_ul_slot(current_slot); - // First PUSCH grant for the UE. const ue_pusch_grant grant1{.user = &slice_ues[u.ue_index], .cell_index = to_du_cell_index(0), .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; + slot_point pusch_slot; ASSERT_TRUE(run_until( - [&]() { return alloc.allocate_ul_grant(grant1, dummy_slice_id, pusch_slot).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pusch(u.crnti, res_grid[0].result.ul) != nullptr; })); - - run_slot(); + [&]() { + pusch_slot = get_next_ul_slot(current_slot); + alloc.allocate_ul_grant(grant1, dummy_slice_id, pusch_slot); + }, + [&]() { return find_ue_pusch(u.crnti, res_grid[0].result.ul) != nullptr; })); // Second PUSCH grant for the UE trying to allocate PUSCH in a slot previous to grant1. const ue_pusch_grant grant2{.user = &slice_ues[u.ue_index], @@ -381,9 +377,10 @@ TEST_P(ue_grid_allocator_tester, consecutive_puschs_for_a_ue_are_allocated_in_in .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_FALSE(run_until([&]() { - return alloc.allocate_ul_grant(grant2, dummy_slice_id, pusch_slot - 1).status == alloc_status::success; - })); + ul_alloc_result result = {alloc_status::invalid_params}; + ASSERT_FALSE(run_until([&]() { result = alloc.allocate_ul_grant(grant2, dummy_slice_id, pusch_slot - 1); }, + [&]() { return result.status == alloc_status::success; }, + 1)); } TEST_P(ue_grid_allocator_tester, consecutive_pdschs_for_a_ue_are_allocated_in_increasing_order_of_time) @@ -401,22 +398,18 @@ TEST_P(ue_grid_allocator_tester, consecutive_pdschs_for_a_ue_are_allocated_in_in .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant1, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); slot_point last_pdsch_slot = current_slot; - run_slot(); - // Second PDSCH grant in the same slot for the UE. const ue_pdsch_grant grant2{.user = &slice_ues[u.ue_index], .cell_index = to_du_cell_index(0), .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant2, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant2, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); ASSERT_GE(current_slot, last_pdsch_slot); } @@ -436,22 +429,18 @@ TEST_P(ue_grid_allocator_tester, .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant1, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); slot_point last_pdsch_ack_slot = current_slot + find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants)->context.k1; - run_slot(); - // Second PDSCH grant in the same slot for the UE. const ue_pdsch_grant grant2{.user = &slice_ues[u.ue_index], .cell_index = to_du_cell_index(0), .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant2, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant2, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); ASSERT_GE(current_slot + find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants)->context.k1, last_pdsch_ack_slot); } @@ -467,7 +456,7 @@ TEST_P(ue_grid_allocator_tester, successfully_allocated_pdsch_even_with_large_ga // Ensure current slot is the middle of 1024 SFNs. i.e. current slot=511.0 while (current_slot.sfn() != NOF_SFNS / 2) { - run_slot(); + slot_indication(); } // First PDSCH grant for the UE. @@ -476,14 +465,14 @@ TEST_P(ue_grid_allocator_tester, successfully_allocated_pdsch_even_with_large_ga .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant1, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); // Ensure next PDSCH to be allocated slot is after wrap around of 1024 SFNs (large gap to last allocated PDSCH slot) // and current slot value is less than last allocated PDSCH slot. e.g. next PDSCH to be allocated slot=SFN 2, slot 2 // after wrap around of 1024 SFNs. for (unsigned i = 0; i < current_slot.nof_slots_per_system_frame() / 2 + current_slot.nof_slots_per_frame(); ++i) { - run_slot(); + slot_indication(); } // Next PDSCH grant to be allocated. @@ -492,9 +481,9 @@ TEST_P(ue_grid_allocator_tester, successfully_allocated_pdsch_even_with_large_ga .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant2, dummy_slice_id).status == alloc_status::success; }, - nof_slot_until_pdsch_is_allocated_threshold)); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant2, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u.crnti, res_grid[0].result.dl.ue_grants) != nullptr; }, + nof_slot_until_pdsch_is_allocated_threshold)); } TEST_P(ue_grid_allocator_tester, successfully_allocated_pusch_even_with_large_gap_to_last_pusch_slot_allocated) @@ -509,7 +498,7 @@ TEST_P(ue_grid_allocator_tester, successfully_allocated_pusch_even_with_large_ga // Ensure current slot is the middle of 1024 SFNs. i.e. current slot=511.0 while (current_slot.sfn() != NOF_SFNS / 2) { - run_slot(); + slot_indication(); } // First PUSCH grant for the UE. @@ -518,16 +507,14 @@ TEST_P(ue_grid_allocator_tester, successfully_allocated_pusch_even_with_large_ga .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = nof_bytes_to_schedule}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)).status == - alloc_status::success; - })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)); }, + [&]() { return find_ue_pusch(u.crnti, res_grid[0].result.ul.puschs) != nullptr; })); // Ensure next PUSCH to be allocated slot is after wrap around of 1024 SFNs (large gap to last allocated PUSCH slot) // and current slot value is less than last allocated PUSCH slot. e.g. next PUSCH to be allocated slot=SFN 2, slot 2 // after wrap around of 1024 SFNs. for (unsigned i = 0; i < current_slot.nof_slots_per_system_frame() / 2 + current_slot.nof_slots_per_frame(); ++i) { - run_slot(); + slot_indication(); } // Second PUSCH grant for the UE. @@ -541,6 +528,7 @@ TEST_P(ue_grid_allocator_tester, successfully_allocated_pusch_even_with_large_ga return alloc.allocate_ul_grant(grant2, dummy_slice_id, get_next_ul_slot(current_slot)).status == alloc_status::success; }, + [&]() { return find_ue_pusch(u.crnti, res_grid[0].result.ul.puschs) != nullptr; }, nof_slot_until_pusch_is_allocated_threshold)); } @@ -575,26 +563,29 @@ TEST_P(ue_grid_allocator_remaining_rbs_alloc_tester, remaining_dl_rbs_are_alloca .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = sched_bytes}; - // Since UE dedicated SearchSpace is a UE specific SearchSpace (Not CSS). Entire BWP CRBs can be used for allocation. + // Since UE dedicated SearchSpace is a UE specific SearchSpace (Not CSS). Entire BWP CRBs can be used for + // allocation. const unsigned total_crbs = cell_cfg.dl_cfg_common.init_dl_bwp.generic_params.crbs.length(); const ue_pdsch_grant grant2{.user = &slice_ues[u2.ue_index], .cell_index = to_du_cell_index(0), .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = sched_bytes}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success and - alloc.allocate_dl_grant(grant2, dummy_slice_id).status == alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { - return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr and - find_ue_pdsch(u2.crnti, res_grid[0].result.dl.ue_grants) != nullptr; - })); + ASSERT_TRUE(run_until( + [&]() { + alloc.allocate_dl_grant(grant1, dummy_slice_id); + alloc.allocate_dl_grant(grant2, dummy_slice_id); + }, + [&]() { + return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr and + find_ue_pdsch(u2.crnti, res_grid[0].result.dl.ue_grants) != nullptr; + })); // Successfully allocates PDSCH corresponding to the grant. ASSERT_GE(find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants)->pdsch_cfg.codewords.back().tb_size_bytes, sched_bytes); - // Since UE dedicated SearchSpace is a UE specific SearchSpace (Not CSS). Entire BWP CRBs can be used for allocation. + // Since UE dedicated SearchSpace is a UE specific SearchSpace (Not CSS). Entire BWP CRBs can be used for + // allocation. const unsigned crbs_allocated = find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants)->pdsch_cfg.rbs.type1().length(); @@ -616,9 +607,8 @@ TEST_P(ue_grid_allocator_remaining_rbs_alloc_tester, remaining_ul_rbs_are_alloca const unsigned recommended_nof_bytes_to_schedule = 200U; - const crb_interval cell_crbs = {cell_cfg.ul_cfg_common.init_ul_bwp.generic_params.crbs.start(), - cell_cfg.ul_cfg_common.init_ul_bwp.generic_params.crbs.stop()}; - slot_point pusch_to_alloc_slot = get_next_ul_slot(current_slot); + const crb_interval cell_crbs = {cell_cfg.ul_cfg_common.init_ul_bwp.generic_params.crbs.start(), + cell_cfg.ul_cfg_common.init_ul_bwp.generic_params.crbs.stop()}; const ue_pusch_grant grant1{.user = &slice_ues[u1.ue_index], .cell_index = to_du_cell_index(0), .h_id = INVALID_HARQ_ID, @@ -627,14 +617,20 @@ TEST_P(ue_grid_allocator_remaining_rbs_alloc_tester, remaining_ul_rbs_are_alloca .cell_index = to_du_cell_index(0), .h_id = INVALID_HARQ_ID, .recommended_nof_bytes = recommended_nof_bytes_to_schedule}; + // The UE needs to have pending data for its grant to be extended. + ues[u2.ue_index].handle_bsr_indication(ul_bsr_indication_message{ + to_du_cell_index(0), u2.ue_index, u2.crnti, bsr_format::SHORT_BSR, {ul_bsr_lcg_report{lcg_id_t{0}, 3000}}}); - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, pusch_to_alloc_slot).status == alloc_status::success and - alloc.allocate_ul_grant(grant2, dummy_slice_id, pusch_to_alloc_slot).status == alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { - return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr and find_ue_pusch(u2.crnti, res_grid[0].result.ul); - })); + ASSERT_TRUE(run_until( + [&]() { + slot_point pusch_to_alloc_slot = get_next_ul_slot(current_slot); + alloc.allocate_ul_grant(grant1, dummy_slice_id, pusch_to_alloc_slot); + alloc.allocate_ul_grant(grant2, dummy_slice_id, pusch_to_alloc_slot); + }, + [&]() { + return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr and + find_ue_pusch(u2.crnti, res_grid[0].result.ul) != nullptr; + })); // Successfully allocates PUSCH corresponding to the grant. ASSERT_GE(find_ue_pusch(u1.crnti, res_grid[0].result.ul)->pusch_cfg.tb_size_bytes, grant1.recommended_nof_bytes); @@ -678,9 +674,8 @@ TEST_P(ue_grid_allocator_expert_cfg_pxsch_nof_rbs_limits_tester, .recommended_nof_bytes = sched_bytes, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant1, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); // Successfully allocates PDSCH. ASSERT_EQ(find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants)->pdsch_cfg.rbs.type1().length(), std::max(expert_cfg.pdsch_nof_rbs.start(), max_nof_rbs_to_schedule)); @@ -705,9 +700,8 @@ TEST_P(ue_grid_allocator_expert_cfg_pxsch_nof_rbs_limits_tester, .recommended_nof_bytes = sched_bytes, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant1, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); // Successfully allocates PDSCH. ASSERT_EQ(find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants)->pdsch_cfg.rbs.type1().length(), std::min(expert_cfg.pdsch_nof_rbs.stop(), max_nof_rbs_to_schedule)); @@ -732,11 +726,8 @@ TEST_P(ue_grid_allocator_expert_cfg_pxsch_nof_rbs_limits_tester, .recommended_nof_bytes = recommended_nof_bytes_to_schedule, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)).status == - alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)); }, + [&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); // Successfully allocates PUSCH. ASSERT_EQ(find_ue_pusch(u1.crnti, res_grid[0].result.ul)->pusch_cfg.rbs.type1().length(), std::max(expert_cfg.pdsch_nof_rbs.start(), max_nof_rbs_to_schedule)); @@ -761,11 +752,8 @@ TEST_P(ue_grid_allocator_expert_cfg_pxsch_nof_rbs_limits_tester, .recommended_nof_bytes = recommended_nof_bytes_to_schedule, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)).status == - alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)); }, + [&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); // Successfully allocates PUSCH. ASSERT_EQ(find_ue_pusch(u1.crnti, res_grid[0].result.ul)->pusch_cfg.rbs.type1().length(), std::min(expert_cfg.pdsch_nof_rbs.stop(), max_nof_rbs_to_schedule)); @@ -813,9 +801,8 @@ TEST_P(ue_grid_allocator_expert_cfg_pxsch_crb_limits_tester, allocates_pdsch_wit .recommended_nof_bytes = sched_bytes, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE( - run_until([&]() { return alloc.allocate_dl_grant(grant1, dummy_slice_id).status == alloc_status::success; })); - ASSERT_TRUE(run_until([&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_dl_grant(grant1, dummy_slice_id); }, + [&]() { return find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants) != nullptr; })); // Successfully allocates PDSCH within RB limits. ASSERT_EQ(find_ue_pdsch(u1.crnti, res_grid[0].result.dl.ue_grants)->pdsch_cfg.rbs.type1(), pdsch_vrb_limits); } @@ -838,11 +825,8 @@ TEST_P(ue_grid_allocator_expert_cfg_pxsch_crb_limits_tester, allocates_pusch_wit .recommended_nof_bytes = recommended_nof_bytes_to_schedule, .max_nof_rbs = max_nof_rbs_to_schedule}; - ASSERT_TRUE(run_until([&]() { - return alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)).status == - alloc_status::success; - })); - ASSERT_TRUE(run_until([&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); + ASSERT_TRUE(run_until([&]() { alloc.allocate_ul_grant(grant1, dummy_slice_id, get_next_ul_slot(current_slot)); }, + [&]() { return find_ue_pusch(u1.crnti, res_grid[0].result.ul) != nullptr; })); // Successfully allocates PUSCH within RB limits. ASSERT_EQ(find_ue_pusch(u1.crnti, res_grid[0].result.ul)->pusch_cfg.rbs.type1(), pusch_vrb_limits); }