diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 416ca114ead24c..39b34d1221b489 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -8,9 +8,11 @@ #include #include +#include #include #include #include +#include #include #include #include @@ -64,6 +66,8 @@ const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN; const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL; const fop_flags_t RUST_CONST_HELPER_FOP_UNSIGNED_OFFSET = FOP_UNSIGNED_OFFSET; +const uint32_t BINDINGS_DRM_EXEC_INTERRUPTIBLE_WAIT = DRM_EXEC_INTERRUPTIBLE_WAIT; + const gfp_t BINDINGS_XA_FLAGS_LOCK_IRQ = XA_FLAGS_LOCK_IRQ; const gfp_t BINDINGS_XA_FLAGS_LOCK_BH = XA_FLAGS_LOCK_BH; const gfp_t BINDINGS_XA_FLAGS_TRACK_FREE = XA_FLAGS_TRACK_FREE; diff --git a/rust/helpers/drm_gpuvm.c b/rust/helpers/drm_gpuvm.c new file mode 100644 index 00000000000000..f4f4ea2c4ec897 --- /dev/null +++ b/rust/helpers/drm_gpuvm.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#ifdef CONFIG_DRM +#ifdef CONFIG_DRM_GPUVM + +struct drm_gpuvm *rust_helper_drm_gpuvm_get(struct drm_gpuvm *obj) +{ + return drm_gpuvm_get(obj); +} + +void rust_helper_drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec) +{ + return drm_gpuvm_exec_unlock(vm_exec); +} + +void rust_helper_drm_gpuva_init_from_op(struct drm_gpuva *va, struct drm_gpuva_op_map *op) +{ + drm_gpuva_init_from_op(va, op); +} + +struct drm_gpuvm_bo *rust_helper_drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) +{ + return drm_gpuvm_bo_get(vm_bo); +} + +bool rust_helper_drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj) +{ + return drm_gpuvm_is_extobj(gpuvm, obj); +} + +#endif +#endif diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 387552d4346739..2dcf12b6be3719 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -16,6 +16,7 @@ #include "dma-mapping.c" #include "dma-resv.c" #include "drm.c" +#include "drm_gpuvm.c" #include "drm_syncobj.c" #include "err.c" #include "io.c" diff --git a/rust/kernel/drm/gpuvm.rs b/rust/kernel/drm/gpuvm.rs new file mode 100644 index 00000000000000..47fd4fbc17ebe8 --- /dev/null +++ b/rust/kernel/drm/gpuvm.rs @@ -0,0 +1,665 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +//! DRM Sync Objects +//! +//! C header: [`include/drm/drm_gpuvm.h`](../../../../include/drm/drm_gpuvm.h) + +#![allow(missing_docs)] + +use crate::{ + alloc::flags::*, + bindings, + drm::{device, drv}, + error::{ + code::{EINVAL, ENOMEM}, + from_result, to_result, Result, + }, + init, + prelude::*, + types::{ARef, AlwaysRefCounted, Opaque}, +}; + +use crate::drm::gem::IntoGEMObject; +use core::cell::UnsafeCell; +use core::marker::{PhantomData, PhantomPinned}; +use core::mem::ManuallyDrop; +use core::ops::{Deref, DerefMut, Range}; +use core::ptr::NonNull; + +/// Trait that must be implemented by DRM drivers to represent a DRM GpuVm (a GPU address space). +pub trait DriverGpuVm: Sized { + /// The parent `Driver` implementation for this `DriverGpuVm`. + type Driver: drv::Driver; + type GpuVa: DriverGpuVa = (); + type GpuVmBo: DriverGpuVmBo = (); + type StepContext = (); + + fn step_map( + self: &mut UpdatingGpuVm<'_, Self>, + op: &mut OpMap, + ctx: &mut Self::StepContext, + ) -> Result; + fn step_unmap( + self: &mut UpdatingGpuVm<'_, Self>, + op: &mut OpUnMap, + ctx: &mut Self::StepContext, + ) -> Result; + fn step_remap( + self: &mut UpdatingGpuVm<'_, Self>, + op: &mut OpReMap, + ctx: &mut Self::StepContext, + ) -> Result; +} + +struct StepContext<'a, T: DriverGpuVm> { + gpuvm: &'a GpuVm, + ctx: &'a mut T::StepContext, +} + +/// Trait that must be implemented by DRM drivers to represent a DRM GpuVa (a mapping in GPU address space). +pub trait DriverGpuVa: Sized {} + +impl DriverGpuVa for () {} + +/// Trait that must be implemented by DRM drivers to represent a DRM GpuVmBo (a connection between a BO and a VM). +pub trait DriverGpuVmBo: Sized { + fn new() -> impl PinInit; +} + +/// Provide a default implementation for trivial types +impl DriverGpuVmBo for T { + fn new() -> impl PinInit { + init::default() + } +} + +#[repr(transparent)] +pub struct OpMap(bindings::drm_gpuva_op_map, PhantomData); +#[repr(transparent)] +pub struct OpUnMap(bindings::drm_gpuva_op_unmap, PhantomData); +#[repr(transparent)] +pub struct OpReMap(bindings::drm_gpuva_op_remap, PhantomData); + +impl OpMap { + pub fn addr(&self) -> u64 { + self.0.va.addr + } + pub fn range(&self) -> u64 { + self.0.va.range + } + pub fn offset(&self) -> u64 { + self.0.gem.offset + } + pub fn object(&self) -> &::Object { + // SAFETY: The GEM object is only ever passed as a Driver object below, so + // the type must be correct. + let p = unsafe { + <::Object as IntoGEMObject>::from_gem_obj(self.0.gem.obj) + }; + // SAFETY: The GEM object has an active reference for the lifetime of this op + unsafe { &*p } + } + pub fn map_and_link_va( + &mut self, + gpuvm: &mut UpdatingGpuVm<'_, T>, + gpuva: Pin>>, + gpuvmbo: &ARef>, + ) -> Result<(), Pin>>> { + // SAFETY: We are handing off the GpuVa ownership and it will not be moved. + let p = Box::leak(unsafe { Pin::into_inner_unchecked(gpuva) }); + // SAFETY: These C functions are called with the correct invariants + unsafe { + bindings::drm_gpuva_init_from_op(&mut p.gpuva, &mut self.0); + if bindings::drm_gpuva_insert(gpuvm.0.gpuvm() as *mut _, &mut p.gpuva) != 0 { + // EEXIST, return the GpuVa to the caller as an error + return Err(Pin::new_unchecked(Box::from_raw(p))); + }; + // SAFETY: This takes a new reference to the gpuvmbo. + bindings::drm_gpuva_link(&mut p.gpuva, &gpuvmbo.bo as *const _ as *mut _); + } + Ok(()) + } +} + +impl OpUnMap { + pub fn va(&self) -> Option<&GpuVa> { + if self.0.va.is_null() { + return None; + } + // SAFETY: Container invariant is guaranteed for ops structs created for our types. + let p = unsafe { crate::container_of!(self.0.va, GpuVa, gpuva) as *mut GpuVa }; + // SAFETY: The GpuVa object reference is valid per the op_unmap contract + Some(unsafe { &*p }) + } + pub fn unmap_and_unlink_va(&mut self) -> Option>>> { + if self.0.va.is_null() { + return None; + } + // SAFETY: Container invariant is guaranteed for ops structs created for our types. + let p = unsafe { crate::container_of!(self.0.va, GpuVa, gpuva) as *mut GpuVa }; + + // SAFETY: The GpuVa object reference is valid per the op_unmap contract + unsafe { + bindings::drm_gpuva_unmap(&mut self.0); + bindings::drm_gpuva_unlink(self.0.va); + } + + // Unlinking/unmapping relinquishes ownership of the GpuVa object, + // so clear the pointer + self.0.va = core::ptr::null_mut(); + // SAFETY: The GpuVa object reference is valid per the op_unmap contract + Some(unsafe { Pin::new_unchecked(Box::from_raw(p)) }) + } +} + +impl OpReMap { + pub fn prev_map(&mut self) -> Option<&mut OpMap> { + // SAFETY: The prev pointer must be valid if not-NULL per the op_remap contract + unsafe { (self.0.prev as *mut OpMap).as_mut() } + } + pub fn next_map(&mut self) -> Option<&mut OpMap> { + // SAFETY: The next pointer must be valid if not-NULL per the op_remap contract + unsafe { (self.0.next as *mut OpMap).as_mut() } + } + pub fn unmap(&mut self) -> &mut OpUnMap { + // SAFETY: The unmap pointer is always valid per the op_remap contract + unsafe { (self.0.unmap as *mut OpUnMap).as_mut().unwrap() } + } +} + +/// A base GPU VA. +#[repr(C)] +#[pin_data] +pub struct GpuVa { + #[pin] + gpuva: bindings::drm_gpuva, + #[pin] + inner: T::GpuVa, + #[pin] + _p: PhantomPinned, +} + +// SAFETY: This type is safe to zero-init (as far as C is concerned). +unsafe impl init::Zeroable for bindings::drm_gpuva {} + +impl GpuVa { + pub fn new(inner: impl PinInit) -> Result>>> + where + Error: From, + { + Box::try_pin_init( + try_pin_init!(Self { + gpuva <- init::zeroed(), + inner <- inner, + _p: PhantomPinned + }), + GFP_KERNEL, + ) + } + + pub fn addr(&self) -> u64 { + self.gpuva.va.addr + } + pub fn range(&self) -> u64 { + self.gpuva.va.range + } + pub fn vm_bo(&self) -> ARef> { + // SAFETY: Container invariant is guaranteed for ops structs created for our types. + let p = + unsafe { crate::container_of!(self.gpuva.vm_bo, GpuVmBo, bo) as *mut GpuVmBo }; + + // SAFETY: We incref and wrap in an ARef, so the reference count is consistent + unsafe { + bindings::drm_gpuvm_bo_get(self.gpuva.vm_bo); + ARef::from_raw(NonNull::new_unchecked(p)) + } + } + pub fn offset(&self) -> u64 { + self.gpuva.gem.offset + } +} + +/// A base GpuVm BO. +#[repr(C)] +#[pin_data] +pub struct GpuVmBo { + #[pin] + bo: bindings::drm_gpuvm_bo, + #[pin] + inner: T::GpuVmBo, + #[pin] + _p: PhantomPinned, +} + +impl GpuVmBo { + /// Return a reference to the inner driver data for this GpuVmBo + pub fn inner(&self) -> &T::GpuVmBo { + &self.inner + } +} + +// SAFETY: DRM GpuVmBo objects are always reference counted and the get/put functions +// satisfy the requirements. +unsafe impl AlwaysRefCounted for GpuVmBo { + fn inc_ref(&self) { + // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref(). + unsafe { bindings::drm_gpuvm_bo_get(&self.bo as *const _ as *mut _) }; + } + + unsafe fn dec_ref(mut obj: NonNull) { + // SAFETY: drm_gpuvm_bo_put() requires holding the gpuva lock, which is the dma_resv lock by default. + // The drm_gpuvm_put function satisfies the requirements for dec_ref(). + // (We do not support custom locks yet.) + unsafe { + let resv = (*obj.as_mut().bo.obj).resv; + bindings::dma_resv_lock(resv, core::ptr::null_mut()); + bindings::drm_gpuvm_bo_put(&mut obj.as_mut().bo); + bindings::dma_resv_unlock(resv); + } + } +} + +/// A base GPU VM. +#[repr(C)] +#[pin_data] +pub struct GpuVm { + #[pin] + gpuvm: Opaque, + #[pin] + inner: UnsafeCell, + #[pin] + _p: PhantomPinned, +} + +pub(super) unsafe extern "C" fn vm_free_callback( + raw_gpuvm: *mut bindings::drm_gpuvm, +) { + // SAFETY: Container invariant is guaranteed for objects using our callback. + let p = unsafe { + crate::container_of!( + raw_gpuvm as *mut Opaque, + GpuVm, + gpuvm + ) as *mut GpuVm + }; + + // SAFETY: p is guaranteed to be valid for drm_gpuvm objects using this callback. + unsafe { drop(Box::from_raw(p)) }; +} + +pub(super) unsafe extern "C" fn vm_bo_alloc_callback() -> *mut bindings::drm_gpuvm_bo +{ + let obj: Result>>> = Box::try_pin_init( + try_pin_init!(GpuVmBo:: { + bo <- init::default(), + inner <- T::GpuVmBo::new(), + _p: PhantomPinned + }), + GFP_KERNEL, + ); + + match obj { + Ok(obj) => + // SAFETY: The DRM core will keep this object pinned + unsafe { + let p = Box::leak(Pin::into_inner_unchecked(obj)); + &mut p.bo + }, + Err(_) => core::ptr::null_mut(), + } +} + +pub(super) unsafe extern "C" fn vm_bo_free_callback( + raw_vm_bo: *mut bindings::drm_gpuvm_bo, +) { + // SAFETY: Container invariant is guaranteed for objects using this callback. + let p = unsafe { crate::container_of!(raw_vm_bo, GpuVmBo, bo) as *mut GpuVmBo }; + + // SAFETY: p is guaranteed to be valid for drm_gpuvm_bo objects using this callback. + unsafe { drop(Box::from_raw(p)) }; +} + +pub(super) unsafe extern "C" fn step_map_callback( + op: *mut bindings::drm_gpuva_op, + _priv: *mut core::ffi::c_void, +) -> core::ffi::c_int { + // SAFETY: We know this is a map op, and OpMap is a transparent wrapper. + let map = unsafe { &mut *((&mut (*op).__bindgen_anon_1.map) as *mut _ as *mut OpMap) }; + // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is + // guaranteed to outlive this function. + let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) }; + + from_result(|| { + UpdatingGpuVm(ctx.gpuvm).step_map(map, ctx.ctx)?; + Ok(0) + }) +} + +pub(super) unsafe extern "C" fn step_remap_callback( + op: *mut bindings::drm_gpuva_op, + _priv: *mut core::ffi::c_void, +) -> core::ffi::c_int { + // SAFETY: We know this is a map op, and OpReMap is a transparent wrapper. + let remap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.remap) as *mut _ as *mut OpReMap) }; + // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is + // guaranteed to outlive this function. + let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) }; + + from_result(|| { + UpdatingGpuVm(ctx.gpuvm).step_remap(remap, ctx.ctx)?; + Ok(0) + }) +} +pub(super) unsafe extern "C" fn step_unmap_callback( + op: *mut bindings::drm_gpuva_op, + _priv: *mut core::ffi::c_void, +) -> core::ffi::c_int { + // SAFETY: We know this is a map op, and OpUnMap is a transparent wrapper. + let unmap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.unmap) as *mut _ as *mut OpUnMap) }; + // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is + // guaranteed to outlive this function. + let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) }; + + from_result(|| { + UpdatingGpuVm(ctx.gpuvm).step_unmap(unmap, ctx.ctx)?; + Ok(0) + }) +} + +pub(super) unsafe extern "C" fn exec_lock_gem_object( + vm_exec: *mut bindings::drm_gpuvm_exec, +) -> core::ffi::c_int { + // SAFETY: The gpuvm_exec object is valid and priv_ is a GEM object pointer + // when this callback is used + unsafe { bindings::drm_exec_lock_obj(&mut (*vm_exec).exec, (*vm_exec).extra.priv_ as *mut _) } +} + +impl GpuVm { + const OPS: bindings::drm_gpuvm_ops = bindings::drm_gpuvm_ops { + vm_free: Some(vm_free_callback::), + op_alloc: None, + op_free: None, + vm_bo_alloc: Some(vm_bo_alloc_callback::), + vm_bo_free: Some(vm_bo_free_callback::), + vm_bo_validate: None, + sm_step_map: Some(step_map_callback::), + sm_step_remap: Some(step_remap_callback::), + sm_step_unmap: Some(step_unmap_callback::), + }; + + fn gpuvm(&self) -> *const bindings::drm_gpuvm { + self.gpuvm.get() + } + + pub fn new( + name: &'static CStr, + dev: &device::Device, + r_obj: &::Object, + range: Range, + reserve_range: Range, + inner: impl PinInit, + ) -> Result>> + where + Error: From, + { + let obj: Pin> = Box::try_pin_init( + try_pin_init!(Self { + // SAFETY: drm_gpuvm_init cannot fail and always initializes the member + gpuvm <- unsafe { + init::pin_init_from_closure(move |slot: *mut Opaque | { + // Zero-init required by drm_gpuvm_init + *slot = Opaque::zeroed(); + bindings::drm_gpuvm_init( + Opaque::raw_get(slot), + name.as_char_ptr(), + 0, + dev.raw_mut(), + r_obj.gem_obj() as *const _ as *mut _, + range.start, + range.end - range.start, + reserve_range.start, + reserve_range.end - reserve_range.start, + &Self::OPS + ); + Ok(()) + }) + }, + // SAFETY: Just passing through to the initializer argument + inner <- unsafe { + init::pin_init_from_closure(move |slot: *mut UnsafeCell | { + inner.__pinned_init(slot as *mut _) + }) + }, + _p: PhantomPinned + }), + GFP_KERNEL, + )?; + + // SAFETY: We never move out of the object + let vm_ref = unsafe { + ARef::from_raw(NonNull::new_unchecked(Box::leak( + Pin::into_inner_unchecked(obj), + ))) + }; + + Ok(vm_ref) + } + + pub fn exec_lock<'a, 'b>( + &'a self, + obj: Option<&'b ::Object>, + ) -> Result> { + // Do not try to lock the object if it is internal (since it is already locked). + let is_ext = obj.map(|a| self.is_extobj(a)).unwrap_or(false); + + let mut guard = ManuallyDrop::new(LockedGpuVm { + gpuvm: self, + // vm_exec needs to be pinned, so stick it in a Box. + vm_exec: Box::init( + init!(bindings::drm_gpuvm_exec { + vm: self.gpuvm() as *mut _, + flags: bindings::BINDINGS_DRM_EXEC_INTERRUPTIBLE_WAIT, + exec: Default::default(), + extra: match (is_ext, obj) { + (true, Some(obj)) => bindings::drm_gpuvm_exec__bindgen_ty_1 { + fn_: Some(exec_lock_gem_object), + priv_: obj.gem_obj() as *const _ as *mut _, + }, + _ => Default::default(), + }, + num_fences: 0, + }), + GFP_KERNEL, + )?, + obj, + }); + + // SAFETY: The object is valid and was initialized above + to_result(unsafe { bindings::drm_gpuvm_exec_lock(&mut *guard.vm_exec) })?; + + Ok(ManuallyDrop::into_inner(guard)) + } + + /// Returns true if the given object is external to the GPUVM + /// (that is, if it does not share the DMA reservation object of the GPUVM). + pub fn is_extobj(&self, obj: &impl IntoGEMObject) -> bool { + let gem = obj.gem_obj() as *const _ as *mut _; + // SAFETY: This is safe to call as long as the arguments are valid pointers. + unsafe { bindings::drm_gpuvm_is_extobj(self.gpuvm() as *mut _, gem) } + } +} + +// SAFETY: DRM GpuVm objects are always reference counted and the get/put functions +// satisfy the requirements. +unsafe impl AlwaysRefCounted for GpuVm { + fn inc_ref(&self) { + // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref(). + unsafe { bindings::drm_gpuvm_get(&self.gpuvm as *const _ as *mut _) }; + } + + unsafe fn dec_ref(obj: NonNull) { + // SAFETY: The drm_gpuvm_put function satisfies the requirements for dec_ref(). + unsafe { bindings::drm_gpuvm_put(Opaque::raw_get(&(*obj.as_ptr()).gpuvm)) }; + } +} + +pub struct LockedGpuVm<'a, 'b, T: DriverGpuVm> { + gpuvm: &'a GpuVm, + vm_exec: Box, + obj: Option<&'b ::Object>, +} + +impl LockedGpuVm<'_, '_, T> { + pub fn find_bo(&mut self) -> Option>> { + let obj = self.obj?; + // SAFETY: LockedGpuVm implies the right locks are held. + let p = unsafe { + bindings::drm_gpuvm_bo_find( + self.gpuvm.gpuvm() as *mut _, + obj.gem_obj() as *const _ as *mut _, + ) + }; + if p.is_null() { + None + } else { + // SAFETY: All the drm_gpuvm_bo objects in this GpuVm are always allocated by us as GpuVmBo. + let p = unsafe { crate::container_of!(p, GpuVmBo, bo) as *mut GpuVmBo }; + // SAFETY: We checked for NULL above, and the types ensure that + // this object was created by vm_bo_alloc_callback. + Some(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) }) + } + } + + pub fn obtain_bo(&mut self) -> Result>> { + let obj = self.obj.ok_or(EINVAL)?; + // SAFETY: LockedGpuVm implies the right locks are held. + let p = unsafe { + bindings::drm_gpuvm_bo_obtain( + self.gpuvm.gpuvm() as *mut _, + obj.gem_obj() as *const _ as *mut _, + ) + }; + if p.is_null() { + Err(ENOMEM) + } else { + // SAFETY: Container invariant is guaranteed for GpuVmBo objects for this GpuVm. + let p = unsafe { crate::container_of!(p, GpuVmBo, bo) as *mut GpuVmBo }; + // SAFETY: We checked for NULL above, and the types ensure that + // this object was created by vm_bo_alloc_callback. + Ok(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) }) + } + } + + pub fn sm_map( + &mut self, + ctx: &mut T::StepContext, + req_addr: u64, + req_range: u64, + req_offset: u64, + ) -> Result { + let obj = self.obj.ok_or(EINVAL)?; + let mut ctx = StepContext { + ctx, + gpuvm: self.gpuvm, + }; + // SAFETY: LockedGpuVm implies the right locks are held. + to_result(unsafe { + bindings::drm_gpuvm_sm_map( + self.gpuvm.gpuvm() as *mut _, + &mut ctx as *mut _ as *mut _, + req_addr, + req_range, + obj.gem_obj() as *const _ as *mut _, + req_offset, + ) + }) + } + + pub fn sm_unmap(&mut self, ctx: &mut T::StepContext, req_addr: u64, req_range: u64) -> Result { + let mut ctx = StepContext { + ctx, + gpuvm: self.gpuvm, + }; + // SAFETY: LockedGpuVm implies the right locks are held. + to_result(unsafe { + bindings::drm_gpuvm_sm_unmap( + self.gpuvm.gpuvm() as *mut _, + &mut ctx as *mut _ as *mut _, + req_addr, + req_range, + ) + }) + } + + pub fn bo_unmap(&mut self, ctx: &mut T::StepContext, bo: &ARef>) -> Result { + let mut ctx = StepContext { + ctx, + gpuvm: self.gpuvm, + }; + // SAFETY: LockedGpuVm implies the right locks are held. + to_result(unsafe { + bindings::drm_gpuvm_bo_unmap(&bo.bo as *const _ as *mut _, &mut ctx as *mut _ as *mut _) + }) + } +} + +impl Deref for LockedGpuVm<'_, '_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: The existence of this LockedGpuVm implies the lock is held, + // so this is the only reference + unsafe { &*self.gpuvm.inner.get() } + } +} + +impl DerefMut for LockedGpuVm<'_, '_, T> { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: The existence of this UpdatingGpuVm implies the lock is held, + // so this is the only reference + unsafe { &mut *self.gpuvm.inner.get() } + } +} + +impl Drop for LockedGpuVm<'_, '_, T> { + fn drop(&mut self) { + // SAFETY: We hold the lock, so it's safe to unlock + unsafe { + bindings::drm_gpuvm_exec_unlock(&mut *self.vm_exec); + } + } +} + +pub struct UpdatingGpuVm<'a, T: DriverGpuVm>(&'a GpuVm); + +impl UpdatingGpuVm<'_, T> {} + +impl Deref for UpdatingGpuVm<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + // SAFETY: The existence of this UpdatingGpuVm implies the lock is held, + // so this is the only reference + unsafe { &*self.0.inner.get() } + } +} + +impl DerefMut for UpdatingGpuVm<'_, T> { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: The existence of this UpdatingGpuVm implies the lock is held, + // so this is the only reference + unsafe { &mut *self.0.inner.get() } + } +} + +impl core::ops::Receiver for UpdatingGpuVm<'_, T> {} + +// SAFETY: All our trait methods take locks +unsafe impl Sync for GpuVm {} +// SAFETY: All our trait methods take locks +unsafe impl Send for GpuVm {} + +// SAFETY: All our trait methods take locks +unsafe impl Sync for GpuVmBo {} +// SAFETY: All our trait methods take locks +unsafe impl Send for GpuVmBo {} diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs index b1f182453ec1dc..50d1bb9139dcd3 100644 --- a/rust/kernel/drm/mod.rs +++ b/rust/kernel/drm/mod.rs @@ -6,6 +6,8 @@ pub mod device; pub mod drv; pub mod file; pub mod gem; +#[cfg(CONFIG_DRM_GPUVM = "y")] +pub mod gpuvm; pub mod ioctl; pub mod mm; pub mod sched;