Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-6.13-1' of https://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 changes for 6.13, part Rust-for-Linux#2

 - Constrain invalidations from GICR_INVLPIR to only affect the LPI
   INTID space

 - Set of robustness improvements to the management of vgic irqs and GIC
   ITS table entries

 - Fix compilation issue w/ CONFIG_CC_OPTIMIZE_FOR_SIZE=y where
   set_sysreg_masks() wasn't getting inlined, breaking check for a
   constant sysreg index

 - Correct KVM's vPMU overflow condition to match the architecture for
   hyp and non-hyp counters
  • Loading branch information
bonzini committed Nov 21, 2024
2 parents 9ee62c3 + 13905f4 commit 1508bae
Show file tree
Hide file tree
Showing 14 changed files with 172 additions and 118 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/kvm/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -951,7 +951,7 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
return v;
}

static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
{
int i = sr - __SANITISED_REG_START__;

Expand Down
62 changes: 45 additions & 17 deletions arch/arm64/kvm/pmu-emul.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,12 +274,23 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
irq_work_sync(&vcpu->arch.pmu.overflow_work);
}

bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu)
{
unsigned int hpmn;
unsigned int hpmn, n;

if (!vcpu_has_nv(vcpu) || idx == ARMV8_PMU_CYCLE_IDX)
return false;
if (!vcpu_has_nv(vcpu))
return 0;

hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
n = vcpu->kvm->arch.pmcr_n;

/*
* Programming HPMN to a value greater than PMCR_EL0.N is
* CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
* UNKNOWN number of counters (in our case, zero) are reserved for EL2.
*/
if (hpmn >= n)
return 0;

/*
* Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
Expand All @@ -288,20 +299,22 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
* implementation choice that all counters are included in the second
* range reserved for EL2/EL3.
*/
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
return idx >= hpmn;
return GENMASK(n - 1, hpmn);
}

bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
{
return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx);
}

u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
{
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
u64 hpmn;

if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
return mask;

hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
return mask & ~GENMASK(vcpu->kvm->arch.pmcr_n - 1, hpmn);
return mask & ~kvm_pmu_hyp_counter_mask(vcpu);
}

u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -375,15 +388,30 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
}
}

static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
/*
* Returns the PMU overflow state, which is true if there exists an event
* counter where the values of the global enable control, PMOVSSET_EL0[n], and
* PMINTENSET_EL1[n] are all 1.
*/
static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = 0;
u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);

if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
}
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);

/*
* PMCR_EL0.E is the global enable control for event counters available
* to EL0 and EL1.
*/
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
reg &= kvm_pmu_hyp_counter_mask(vcpu);

/*
* Otherwise, MDCR_EL2.HPME is the global enable control for event
* counters reserved for EL2.
*/
if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME))
reg &= ~kvm_pmu_hyp_counter_mask(vcpu);

return reg;
}
Expand All @@ -396,7 +424,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
if (!kvm_vcpu_has_pmu(vcpu))
return;

overflow = !!kvm_pmu_overflow_status(vcpu);
overflow = kvm_pmu_overflow_status(vcpu);
if (pmu->irq_level == overflow)
return;

Expand Down
5 changes: 4 additions & 1 deletion arch/arm64/kvm/vgic/vgic-debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,10 @@ static int vgic_debug_show(struct seq_file *s, void *v)
* Expect this to succeed, as iter_mark_lpis() takes a reference on
* every LPI to be visited.
*/
irq = vgic_get_irq(kvm, vcpu, iter->intid);
if (iter->intid < VGIC_NR_PRIVATE_IRQS)
irq = vgic_get_vcpu_irq(vcpu, iter->intid);
else
irq = vgic_get_irq(kvm, iter->intid);
if (WARN_ON_ONCE(!irq))
return -EINVAL;

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/vgic/vgic-init.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ int vgic_init(struct kvm *kvm)
goto out;

for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
struct vgic_irq *irq = vgic_get_irq(kvm, vcpu, i);
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, i);

switch (dist->vgic_model) {
case KVM_DEV_TYPE_ARM_VGIC_V3:
Expand Down
77 changes: 54 additions & 23 deletions arch/arm64/kvm/vgic/vgic-its.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,41 @@ static int vgic_its_commit_v0(struct vgic_its *its);
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
struct kvm_vcpu *filter_vcpu, bool needs_inv);

#define vgic_its_read_entry_lock(i, g, valp, t) \
({ \
int __sz = vgic_its_get_abi(i)->t##_esz; \
struct kvm *__k = (i)->dev->kvm; \
int __ret; \
\
BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
sizeof(*(valp)) != ABI_0_ESZ); \
if (NR_ITS_ABIS > 1 && \
KVM_BUG_ON(__sz != sizeof(*(valp)), __k)) \
__ret = -EINVAL; \
else \
__ret = kvm_read_guest_lock(__k, (g), \
valp, __sz); \
__ret; \
})

#define vgic_its_write_entry_lock(i, g, val, t) \
({ \
int __sz = vgic_its_get_abi(i)->t##_esz; \
struct kvm *__k = (i)->dev->kvm; \
typeof(val) __v = (val); \
int __ret; \
\
BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
sizeof(__v) != ABI_0_ESZ); \
if (NR_ITS_ABIS > 1 && \
KVM_BUG_ON(__sz != sizeof(__v), __k)) \
__ret = -EINVAL; \
else \
__ret = vgic_write_guest_lock(__k, (g), \
&__v, __sz); \
__ret; \
})

/*
* Creates a new (reference to a) struct vgic_irq for a given LPI.
* If this LPI is already mapped on another ITS, we increase its refcount
Expand All @@ -42,7 +77,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
struct kvm_vcpu *vcpu)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
unsigned long flags;
int ret;

Expand Down Expand Up @@ -419,7 +454,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
last_byte_offset = byte_offset;
}

irq = vgic_get_irq(vcpu->kvm, NULL, intid);
irq = vgic_get_irq(vcpu->kvm, intid);
if (!irq)
continue;

Expand Down Expand Up @@ -794,7 +829,7 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,

its_free_ite(kvm, ite);

return vgic_its_write_entry_lock(its, gpa, 0, ite_esz);
return vgic_its_write_entry_lock(its, gpa, 0ULL, ite);
}

return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
Expand Down Expand Up @@ -1143,7 +1178,6 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
bool valid = its_cmd_get_validbit(its_cmd);
u8 num_eventid_bits = its_cmd_get_size(its_cmd);
gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
int dte_esz = vgic_its_get_abi(its)->dte_esz;
struct its_device *device;
gpa_t gpa;

Expand All @@ -1168,7 +1202,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
* is an error, so we are done in any case.
*/
if (!valid)
return vgic_its_write_entry_lock(its, gpa, 0, dte_esz);
return vgic_its_write_entry_lock(its, gpa, 0ULL, dte);

device = vgic_its_alloc_device(its, device_id, itt_addr,
num_eventid_bits);
Expand Down Expand Up @@ -1288,7 +1322,7 @@ int vgic_its_invall(struct kvm_vcpu *vcpu)
unsigned long intid;

xa_for_each(&dist->lpi_xa, intid, irq) {
irq = vgic_get_irq(kvm, NULL, intid);
irq = vgic_get_irq(kvm, intid);
if (!irq)
continue;

Expand Down Expand Up @@ -1354,7 +1388,7 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
return 0;

xa_for_each(&dist->lpi_xa, intid, irq) {
irq = vgic_get_irq(kvm, NULL, intid);
irq = vgic_get_irq(kvm, intid);
if (!irq)
continue;

Expand Down Expand Up @@ -2090,7 +2124,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
* vgic_its_save_ite - Save an interrupt translation entry at @gpa
*/
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
struct its_ite *ite, gpa_t gpa, int ite_esz)
struct its_ite *ite, gpa_t gpa)
{
u32 next_offset;
u64 val;
Expand All @@ -2101,7 +2135,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
ite->collection->collection_id;
val = cpu_to_le64(val);

return vgic_its_write_entry_lock(its, gpa, val, ite_esz);
return vgic_its_write_entry_lock(its, gpa, val, ite);
}

/**
Expand Down Expand Up @@ -2201,7 +2235,7 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
return -EACCES;

ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
ret = vgic_its_save_ite(its, device, ite, gpa);
if (ret)
return ret;
}
Expand Down Expand Up @@ -2240,10 +2274,9 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
* @its: ITS handle
* @dev: ITS device
* @ptr: GPA
* @dte_esz: device table entry size
*/
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
gpa_t ptr, int dte_esz)
gpa_t ptr)
{
u64 val, itt_addr_field;
u32 next_offset;
Expand All @@ -2256,7 +2289,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);

return vgic_its_write_entry_lock(its, ptr, val, dte_esz);
return vgic_its_write_entry_lock(its, ptr, val, dte);
}

/**
Expand Down Expand Up @@ -2332,10 +2365,8 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a,
*/
static int vgic_its_save_device_tables(struct vgic_its *its)
{
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
u64 baser = its->baser_device_table;
struct its_device *dev;
int dte_esz = abi->dte_esz;

if (!(baser & GITS_BASER_VALID))
return 0;
Expand All @@ -2354,7 +2385,7 @@ static int vgic_its_save_device_tables(struct vgic_its *its)
if (ret)
return ret;

ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
ret = vgic_its_save_dte(its, dev, eaddr);
if (ret)
return ret;
}
Expand Down Expand Up @@ -2435,7 +2466,7 @@ static int vgic_its_restore_device_tables(struct vgic_its *its)

static int vgic_its_save_cte(struct vgic_its *its,
struct its_collection *collection,
gpa_t gpa, int esz)
gpa_t gpa)
{
u64 val;

Expand All @@ -2444,23 +2475,23 @@ static int vgic_its_save_cte(struct vgic_its *its,
collection->collection_id);
val = cpu_to_le64(val);

return vgic_its_write_entry_lock(its, gpa, val, esz);
return vgic_its_write_entry_lock(its, gpa, val, cte);
}

/*
* Restore a collection entry into the ITS collection table.
* Return +1 on success, 0 if the entry was invalid (which should be
* interpreted as end-of-table), and a negative error value for generic errors.
*/
static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa)
{
struct its_collection *collection;
struct kvm *kvm = its->dev->kvm;
u32 target_addr, coll_id;
u64 val;
int ret;

ret = vgic_its_read_entry_lock(its, gpa, &val, esz);
ret = vgic_its_read_entry_lock(its, gpa, &val, cte);
if (ret)
return ret;
val = le64_to_cpu(val);
Expand Down Expand Up @@ -2507,7 +2538,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;

list_for_each_entry(collection, &its->collection_list, coll_list) {
ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
ret = vgic_its_save_cte(its, collection, gpa);
if (ret)
return ret;
gpa += cte_esz;
Expand All @@ -2521,7 +2552,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
* table is not fully filled, add a last dummy element
* with valid bit unset
*/
return vgic_its_write_entry_lock(its, gpa, 0, cte_esz);
return vgic_its_write_entry_lock(its, gpa, 0ULL, cte);
}

/*
Expand All @@ -2546,7 +2577,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;

while (read < max_size) {
ret = vgic_its_restore_cte(its, gpa, cte_esz);
ret = vgic_its_restore_cte(its, gpa);
if (ret <= 0)
break;
gpa += cte_esz;
Expand Down
Loading

0 comments on commit 1508bae

Please sign in to comment.