summaryrefslogtreecommitdiff
path: root/rust/kernel/drm
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/drm')
-rw-r--r--rust/kernel/drm/device.rs76
-rw-r--r--rust/kernel/drm/drv.rs342
-rw-r--r--rust/kernel/drm/file.rs113
-rw-r--r--rust/kernel/drm/gem/mod.rs387
-rw-r--r--rust/kernel/drm/gem/shmem.rs385
-rw-r--r--rust/kernel/drm/ioctl.rs147
-rw-r--r--rust/kernel/drm/mm.rs309
-rw-r--r--rust/kernel/drm/mod.rs12
-rw-r--r--rust/kernel/drm/sched.rs358
-rw-r--r--rust/kernel/drm/syncobj.rs77
10 files changed, 2206 insertions, 0 deletions
diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
new file mode 100644
index 000000000000..6007f941137a
--- /dev/null
+++ b/rust/kernel/drm/device.rs
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM device.
+//!
+//! C header: [`include/linux/drm/drm_device.h`](../../../../include/linux/drm/drm_device.h)
+
+use crate::{bindings, device, drm, types::ForeignOwnable};
+use core::marker::PhantomData;
+
+/// Represents a reference to a DRM device. The device is reference-counted and is guaranteed to
+/// not be dropped while this object is alive.
+pub struct Device<T: drm::drv::Driver> {
+ // Type invariant: ptr must be a valid and initialized drm_device,
+ // and this value must either own a reference to it or the caller
+ // must ensure that it is never dropped if the reference is borrowed.
+ pub(super) ptr: *mut bindings::drm_device,
+ _p: PhantomData<T>,
+}
+
+impl<T: drm::drv::Driver> Device<T> {
+ // Not intended to be called externally, except via declare_drm_ioctls!()
+ #[doc(hidden)]
+ pub unsafe fn from_raw(raw: *mut bindings::drm_device) -> Device<T> {
+ Device {
+ ptr: raw,
+ _p: PhantomData,
+ }
+ }
+
+ #[allow(dead_code)]
+ pub(crate) fn raw(&self) -> *const bindings::drm_device {
+ self.ptr
+ }
+
+ pub(crate) fn raw_mut(&mut self) -> *mut bindings::drm_device {
+ self.ptr
+ }
+
+ /// Returns a borrowed reference to the user data associated with this Device.
+ pub fn data(&self) -> <T::Data as ForeignOwnable>::Borrowed<'_> {
+ unsafe { T::Data::borrow((*self.ptr).dev_private) }
+ }
+}
+
+impl<T: drm::drv::Driver> Drop for Device<T> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // relinquish it now.
+ unsafe { bindings::drm_dev_put(self.ptr) };
+ }
+}
+
+impl<T: drm::drv::Driver> Clone for Device<T> {
+ fn clone(&self) -> Self {
+ // SAFETY: We get a new reference and then create a new owning object from the raw pointer
+ unsafe {
+ bindings::drm_dev_get(self.ptr);
+ Device::from_raw(self.ptr)
+ }
+ }
+}
+
+// SAFETY: `Device` only holds a pointer to a C device, which is safe to be used from any thread.
+unsafe impl<T: drm::drv::Driver> Send for Device<T> {}
+
+// SAFETY: `Device` only holds a pointer to a C device, references to which are safe to be used
+// from any thread.
+unsafe impl<T: drm::drv::Driver> Sync for Device<T> {}
+
+// Make drm::Device work for dev_info!() and friends
+unsafe impl<T: drm::drv::Driver> device::RawDevice for Device<T> {
+ fn raw_device(&self) -> *mut bindings::device {
+ // SAFETY: ptr must be valid per the type invariant
+ unsafe { (*self.ptr).dev }
+ }
+}
diff --git a/rust/kernel/drm/drv.rs b/rust/kernel/drm/drv.rs
new file mode 100644
index 000000000000..c138352cb489
--- /dev/null
+++ b/rust/kernel/drm/drv.rs
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM driver core.
+//!
+//! C header: [`include/linux/drm/drm_drv.h`](../../../../include/linux/drm/drm_drv.h)
+
+use crate::{
+ bindings, device, drm,
+ error::code::*,
+ error::from_kernel_err_ptr,
+ error::{Error, Result},
+ prelude::*,
+ private::Sealed,
+ str::CStr,
+ types::ForeignOwnable,
+ ThisModule,
+};
+use core::{
+ marker::{PhantomData, PhantomPinned},
+ pin::Pin,
+};
+use macros::vtable;
+
+/// Driver use the GEM memory manager. This should be set for all modern drivers.
+pub const FEAT_GEM: u32 = bindings::drm_driver_feature_DRIVER_GEM;
+/// Driver supports mode setting interfaces (KMS).
+pub const FEAT_MODESET: u32 = bindings::drm_driver_feature_DRIVER_MODESET;
+/// Driver supports dedicated render nodes.
+pub const FEAT_RENDER: u32 = bindings::drm_driver_feature_DRIVER_RENDER;
+/// Driver supports the full atomic modesetting userspace API.
+///
+/// Drivers which only use atomic internally, but do not support the full userspace API (e.g. not
+/// all properties converted to atomic, or multi-plane updates are not guaranteed to be tear-free)
+/// should not set this flag.
+pub const FEAT_ATOMIC: u32 = bindings::drm_driver_feature_DRIVER_ATOMIC;
+/// Driver supports DRM sync objects for explicit synchronization of command submission.
+pub const FEAT_SYNCOBJ: u32 = bindings::drm_driver_feature_DRIVER_SYNCOBJ;
+/// Driver supports the timeline flavor of DRM sync objects for explicit synchronization of command
+/// submission.
+pub const FEAT_SYNCOBJ_TIMELINE: u32 = bindings::drm_driver_feature_DRIVER_SYNCOBJ_TIMELINE;
+
+/// Information data for a DRM Driver.
+pub struct DriverInfo {
+ /// Driver major version.
+ pub major: i32,
+ /// Driver minor version.
+ pub minor: i32,
+ /// Driver patchlevel version.
+ pub patchlevel: i32,
+ /// Driver name.
+ pub name: &'static CStr,
+ /// Driver description.
+ pub desc: &'static CStr,
+ /// Driver date.
+ pub date: &'static CStr,
+}
+
+/// Internal memory management operation set, normally created by memory managers (e.g. GEM).
+///
+/// See `kernel::drm::gem` and `kernel::drm::gem::shmem`.
+pub struct AllocOps {
+ pub(crate) gem_create_object: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ size: usize,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) prime_handle_to_fd: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ file_priv: *mut bindings::drm_file,
+ handle: u32,
+ flags: u32,
+ prime_fd: *mut core::ffi::c_int,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) prime_fd_to_handle: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ file_priv: *mut bindings::drm_file,
+ prime_fd: core::ffi::c_int,
+ handle: *mut u32,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) gem_prime_import: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ dma_buf: *mut bindings::dma_buf,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) gem_prime_import_sg_table: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ attach: *mut bindings::dma_buf_attachment,
+ sgt: *mut bindings::sg_table,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) gem_prime_mmap: Option<
+ unsafe extern "C" fn(
+ obj: *mut bindings::drm_gem_object,
+ vma: *mut bindings::vm_area_struct,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) dumb_create: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ args: *mut bindings::drm_mode_create_dumb,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) dumb_map_offset: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ handle: u32,
+ offset: *mut u64,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) dumb_destroy: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ handle: u32,
+ ) -> core::ffi::c_int,
+ >,
+}
+
+/// Trait for memory manager implementations. Implemented internally.
+pub trait AllocImpl: Sealed + drm::gem::IntoGEMObject {
+ /// The C callback operations for this memory manager.
+ const ALLOC_OPS: AllocOps;
+}
+
+/// A DRM driver implementation.
+#[vtable]
+pub trait Driver {
+ /// Context data associated with the DRM driver
+ ///
+ /// Determines the type of the context data passed to each of the methods of the trait.
+ type Data: ForeignOwnable + Sync + Send;
+
+ /// The type used to manage memory for this driver.
+ ///
+ /// Should be either `drm::gem::Object<T>` or `drm::gem::shmem::Object<T>`.
+ type Object: AllocImpl;
+
+ /// The type used to represent a DRM File (client)
+ type File: drm::file::DriverFile;
+
+ /// Driver metadata
+ const INFO: DriverInfo;
+
+ /// Feature flags
+ const FEATURES: u32;
+
+ /// IOCTL list. See `kernel::drm::ioctl::declare_drm_ioctls!{}`.
+ const IOCTLS: &'static [drm::ioctl::DrmIoctlDescriptor];
+}
+
+/// A registration of a DRM device
+///
+/// # Invariants:
+///
+/// drm is always a valid pointer to an allocated drm_device
+pub struct Registration<T: Driver> {
+ drm: drm::device::Device<T>,
+ registered: bool,
+ fops: bindings::file_operations,
+ vtable: Pin<Box<bindings::drm_driver>>,
+ _p: PhantomData<T>,
+ _pin: PhantomPinned,
+}
+
+#[cfg(CONFIG_DRM_LEGACY)]
+macro_rules! drm_legacy_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::drm_driver {
+ $( $field: $val ),*,
+ firstopen: None,
+ preclose: None,
+ dma_ioctl: None,
+ dma_quiescent: None,
+ context_dtor: None,
+ irq_handler: None,
+ irq_preinstall: None,
+ irq_postinstall: None,
+ irq_uninstall: None,
+ get_vblank_counter: None,
+ enable_vblank: None,
+ disable_vblank: None,
+ dev_priv_size: 0,
+ }
+ }
+}
+
+#[cfg(not(CONFIG_DRM_LEGACY))]
+macro_rules! drm_legacy_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::drm_driver {
+ $( $field: $val ),*
+ }
+ }
+}
+
+/// Registers a DRM device with the rest of the kernel.
+///
+/// It automatically picks up THIS_MODULE.
+#[allow(clippy::crate_in_macro_def)]
+#[macro_export]
+macro_rules! drm_device_register {
+ ($reg:expr, $data:expr, $flags:expr $(,)?) => {{
+ $crate::drm::drv::Registration::register($reg, $data, $flags, &crate::THIS_MODULE)
+ }};
+}
+
+impl<T: Driver> Registration<T> {
+ const VTABLE: bindings::drm_driver = drm_legacy_fields! {
+ load: None,
+ open: Some(drm::file::open_callback::<T::File>),
+ postclose: Some(drm::file::postclose_callback::<T::File>),
+ lastclose: None,
+ unload: None,
+ release: None,
+ master_set: None,
+ master_drop: None,
+ debugfs_init: None,
+ gem_create_object: T::Object::ALLOC_OPS.gem_create_object,
+ prime_handle_to_fd: T::Object::ALLOC_OPS.prime_handle_to_fd,
+ prime_fd_to_handle: T::Object::ALLOC_OPS.prime_fd_to_handle,
+ gem_prime_import: T::Object::ALLOC_OPS.gem_prime_import,
+ gem_prime_import_sg_table: T::Object::ALLOC_OPS.gem_prime_import_sg_table,
+ gem_prime_mmap: T::Object::ALLOC_OPS.gem_prime_mmap,
+ dumb_create: T::Object::ALLOC_OPS.dumb_create,
+ dumb_map_offset: T::Object::ALLOC_OPS.dumb_map_offset,
+ dumb_destroy: T::Object::ALLOC_OPS.dumb_destroy,
+
+ major: T::INFO.major,
+ minor: T::INFO.minor,
+ patchlevel: T::INFO.patchlevel,
+ name: T::INFO.name.as_char_ptr() as *mut _,
+ desc: T::INFO.desc.as_char_ptr() as *mut _,
+ date: T::INFO.date.as_char_ptr() as *mut _,
+
+ driver_features: T::FEATURES,
+ ioctls: T::IOCTLS.as_ptr(),
+ num_ioctls: T::IOCTLS.len() as i32,
+ fops: core::ptr::null_mut(),
+ };
+
+ /// Creates a new [`Registration`] but does not register it yet.
+ ///
+ /// It is allowed to move.
+ pub fn new(parent: &dyn device::RawDevice) -> Result<Self> {
+ let vtable = Pin::new(Box::try_new(Self::VTABLE)?);
+ let raw_drm = unsafe { bindings::drm_dev_alloc(&*vtable, parent.raw_device()) };
+ let raw_drm = from_kernel_err_ptr(raw_drm)?;
+
+ // The reference count is one, and now we take ownership of that reference as a
+ // drm::device::Device.
+ let drm = unsafe { drm::device::Device::from_raw(raw_drm) };
+
+ Ok(Self {
+ drm,
+ registered: false,
+ vtable,
+ fops: drm::gem::create_fops(),
+ _pin: PhantomPinned,
+ _p: PhantomData,
+ })
+ }
+
+ /// Registers a DRM device with the rest of the kernel.
+ ///
+ /// Users are encouraged to use the [`drm_device_register!()`] macro because it automatically
+ /// picks up the current module.
+ pub fn register(
+ self: Pin<&mut Self>,
+ data: T::Data,
+ flags: usize,
+ module: &'static ThisModule,
+ ) -> Result {
+ if self.registered {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ let data_pointer = <T::Data as ForeignOwnable>::into_foreign(data);
+ // SAFETY: `drm` is valid per the type invariant
+ unsafe {
+ (*this.drm.raw_mut()).dev_private = data_pointer as *mut _;
+ }
+
+ this.fops.owner = module.0;
+ this.vtable.fops = &this.fops;
+
+ // SAFETY: The device is now initialized and ready to be registered.
+ let ret = unsafe { bindings::drm_dev_register(this.drm.raw_mut(), flags as u64) };
+ if ret < 0 {
+ // SAFETY: `data_pointer` was returned by `into_foreign` above.
+ unsafe { T::Data::from_foreign(data_pointer) };
+ return Err(Error::from_kernel_errno(ret));
+ }
+
+ this.registered = true;
+ Ok(())
+ }
+
+ /// Returns a reference to the `Device` instance for this registration.
+ pub fn device(&self) -> &drm::device::Device<T> {
+ &self.drm
+ }
+}
+
+// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between threads
+// or CPUs, so it is safe to share it.
+unsafe impl<T: Driver> Sync for Registration<T> {}
+
+// SAFETY: Registration with and unregistration from the drm subsystem can happen from any thread.
+// Additionally, `T::Data` (which is dropped during unregistration) is `Send`, so it is ok to move
+// `Registration` to different threads.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: Driver> Send for Registration<T> {}
+
+impl<T: Driver> Drop for Registration<T> {
+ /// Removes the registration from the kernel if it has completed successfully before.
+ fn drop(&mut self) {
+ if self.registered {
+ // Get a pointer to the data stored in device before destroying it.
+ // SAFETY: `drm` is valid per the type invariant
+ let data_pointer = unsafe { (*self.drm.raw_mut()).dev_private };
+
+ // SAFETY: Since `registered` is true, `self.drm` is both valid and registered.
+ unsafe { bindings::drm_dev_unregister(self.drm.raw_mut()) };
+
+ // Free data as well.
+ // SAFETY: `data_pointer` was returned by `into_foreign` during registration.
+ unsafe { <T::Data as ForeignOwnable>::from_foreign(data_pointer) };
+ }
+ }
+}
diff --git a/rust/kernel/drm/file.rs b/rust/kernel/drm/file.rs
new file mode 100644
index 000000000000..48751e93c38a
--- /dev/null
+++ b/rust/kernel/drm/file.rs
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM File objects.
+//!
+//! C header: [`include/linux/drm/drm_file.h`](../../../../include/linux/drm/drm_file.h)
+
+use crate::{bindings, drm, error::Result};
+use alloc::boxed::Box;
+use core::marker::PhantomData;
+use core::ops::Deref;
+
+/// Trait that must be implemented by DRM drivers to represent a DRM File (a client instance).
+pub trait DriverFile {
+ /// The parent `Driver` implementation for this `DriverFile`.
+ type Driver: drm::drv::Driver;
+
+ /// Open a new file (called when a client opens the DRM device).
+ fn open(device: &drm::device::Device<Self::Driver>) -> Result<Box<Self>>;
+}
+
+/// An open DRM File.
+///
+/// # Invariants
+/// `raw` is a valid pointer to a `drm_file` struct.
+#[repr(transparent)]
+pub struct File<T: DriverFile> {
+ raw: *mut bindings::drm_file,
+ _p: PhantomData<T>,
+}
+
+pub(super) unsafe extern "C" fn open_callback<T: DriverFile>(
+ raw_dev: *mut bindings::drm_device,
+ raw_file: *mut bindings::drm_file,
+) -> core::ffi::c_int {
+ let drm = core::mem::ManuallyDrop::new(unsafe { drm::device::Device::from_raw(raw_dev) });
+ // SAFETY: This reference won't escape this function
+ let file = unsafe { &mut *raw_file };
+
+ let inner = match T::open(&drm) {
+ Err(e) => {
+ return e.to_kernel_errno();
+ }
+ Ok(i) => i,
+ };
+
+ file.driver_priv = Box::into_raw(inner) as *mut _;
+
+ 0
+}
+
+pub(super) unsafe extern "C" fn postclose_callback<T: DriverFile>(
+ _dev: *mut bindings::drm_device,
+ raw_file: *mut bindings::drm_file,
+) {
+ // SAFETY: This reference won't escape this function
+ let file = unsafe { &*raw_file };
+
+ // Drop the DriverFile
+ unsafe { Box::from_raw(file.driver_priv as *mut T) };
+}
+
+impl<T: DriverFile> File<T> {
+ // Not intended to be called externally, except via declare_drm_ioctls!()
+ #[doc(hidden)]
+ pub unsafe fn from_raw(raw_file: *mut bindings::drm_file) -> File<T> {
+ File {
+ raw: raw_file,
+ _p: PhantomData,
+ }
+ }
+
+ #[allow(dead_code)]
+ /// Return the raw pointer to the underlying `drm_file`.
+ pub(super) fn raw(&self) -> *const bindings::drm_file {
+ self.raw
+ }
+
+ /// Return an immutable reference to the raw `drm_file` structure.
+ pub(super) fn file(&self) -> &bindings::drm_file {
+ unsafe { &*self.raw }
+ }
+}
+
+impl<T: DriverFile> Deref for File<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*(self.file().driver_priv as *const T) }
+ }
+}
+
+impl<T: DriverFile> crate::private::Sealed for File<T> {}
+
+/// Generic trait to allow users that don't care about driver specifics to accept any File<T>.
+///
+/// # Safety
+/// Must only be implemented for File<T> and return the pointer, following the normal invariants
+/// of that type.
+pub unsafe trait GenericFile: crate::private::Sealed {
+ /// Returns the raw const pointer to the `struct drm_file`
+ fn raw(&self) -> *const bindings::drm_file;
+ /// Returns the raw mut pointer to the `struct drm_file`
+ fn raw_mut(&mut self) -> *mut bindings::drm_file;
+}
+
+unsafe impl<T: DriverFile> GenericFile for File<T> {
+ fn raw(&self) -> *const bindings::drm_file {
+ self.raw
+ }
+ fn raw_mut(&mut self) -> *mut bindings::drm_file {
+ self.raw
+ }
+}
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
new file mode 100644
index 000000000000..c4a42bb2f718
--- /dev/null
+++ b/rust/kernel/drm/gem/mod.rs
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM GEM API
+//!
+//! C header: [`include/linux/drm/drm_gem.h`](../../../../include/linux/drm/drm_gem.h)
+
+#[cfg(CONFIG_DRM_GEM_SHMEM_HELPER = "y")]
+pub mod shmem;
+
+use alloc::boxed::Box;
+
+use crate::{
+ bindings,
+ drm::{device, drv, file},
+ error::{to_result, Result},
+ prelude::*,
+};
+use core::{mem, mem::ManuallyDrop, ops::Deref, ops::DerefMut};
+
+/// GEM object functions, which must be implemented by drivers.
+pub trait BaseDriverObject<T: BaseObject>: Sync + Send + Sized {
+ /// Create a new driver data object for a GEM object of a given size.
+ fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<Self>;
+
+ /// Open a new handle to an existing object, associated with a File.
+ fn open(
+ _obj: &<<T as IntoGEMObject>::Driver as drv::Driver>::Object,
+ _file: &file::File<<<T as IntoGEMObject>::Driver as drv::Driver>::File>,
+ ) -> Result {
+ Ok(())
+ }
+
+ /// Close a handle to an existing object, associated with a File.
+ fn close(
+ _obj: &<<T as IntoGEMObject>::Driver as drv::Driver>::Object,
+ _file: &file::File<<<T as IntoGEMObject>::Driver as drv::Driver>::File>,
+ ) {
+ }
+}
+
+/// Trait that represents a GEM object subtype
+pub trait IntoGEMObject: Sized + crate::private::Sealed {
+ /// Owning driver for this type
+ type Driver: drv::Driver;
+
+ /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
+ /// this owning object is valid.
+ fn gem_obj(&self) -> &bindings::drm_gem_object;
+
+ /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
+ /// this owning object is valid.
+ fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object;
+
+ /// Converts a pointer to a `drm_gem_object` into a pointer to this type.
+ fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Self;
+}
+
+/// Trait which must be implemented by drivers using base GEM objects.
+pub trait DriverObject: BaseDriverObject<Object<Self>> {
+ /// Parent `Driver` for this object.
+ type Driver: drv::Driver;
+}
+
+unsafe extern "C" fn free_callback<T: DriverObject>(obj: *mut bindings::drm_gem_object) {
+ // SAFETY: All of our objects are Object<T>.
+ let this = crate::container_of!(obj, Object<T>, obj) as *mut Object<T>;
+
+ // SAFETY: The pointer we got has to be valid
+ unsafe { bindings::drm_gem_object_release(obj) };
+
+ // SAFETY: All of our objects are allocated via Box<>, and we're in the
+ // free callback which guarantees this object has zero remaining references,
+ // so we can drop it
+ unsafe { Box::from_raw(this) };
+}
+
+unsafe extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
+ raw_obj: *mut bindings::drm_gem_object,
+ raw_file: *mut bindings::drm_file,
+) -> core::ffi::c_int {
+ // SAFETY: The pointer we got has to be valid.
+ let file = unsafe {
+ file::File::<<<U as IntoGEMObject>::Driver as drv::Driver>::File>::from_raw(raw_file)
+ };
+ let obj =
+ <<<U as IntoGEMObject>::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(
+ raw_obj,
+ );
+
+ // SAFETY: from_gem_obj() returns a valid pointer as long as the type is
+ // correct and the raw_obj we got is valid.
+ match T::open(unsafe { &*obj }, &file) {
+ Err(e) => e.to_kernel_errno(),
+ Ok(()) => 0,
+ }
+}
+
+unsafe extern "C" fn close_callback<T: BaseDriverObject<U>, U: BaseObject>(
+ raw_obj: *mut bindings::drm_gem_object,
+ raw_file: *mut bindings::drm_file,
+) {
+ // SAFETY: The pointer we got has to be valid.
+ let file = unsafe {
+ file::File::<<<U as IntoGEMObject>::Driver as drv::Driver>::File>::from_raw(raw_file)
+ };
+ let obj =
+ <<<U as IntoGEMObject>::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(
+ raw_obj,
+ );
+
+ // SAFETY: from_gem_obj() returns a valid pointer as long as the type is
+ // correct and the raw_obj we got is valid.
+ T::close(unsafe { &*obj }, &file);
+}
+
+impl<T: DriverObject> IntoGEMObject for Object<T> {
+ type Driver = T::Driver;
+
+ fn gem_obj(&self) -> &bindings::drm_gem_object {
+ &self.obj
+ }
+
+ fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object {
+ &mut self.obj
+ }
+
+ fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Object<T> {
+ crate::container_of!(obj, Object<T>, obj) as *mut Object<T>
+ }
+}
+
+/// Base operations shared by all GEM object classes
+pub trait BaseObject: IntoGEMObject {
+ /// Returns the size of the object in bytes.
+ fn size(&self) -> usize {
+ self.gem_obj().size
+ }
+
+ /// Sets the exportable flag, which controls whether the object can be exported via PRIME.
+ fn set_exportable(&mut self, exportable: bool) {
+ self.mut_gem_obj().exportable = exportable;
+ }
+
+ /// Creates a new reference to the object.
+ fn reference(&self) -> ObjectRef<Self> {
+ // SAFETY: Having a reference to an Object implies holding a GEM reference
+ unsafe {
+ bindings::drm_gem_object_get(self.gem_obj() as *const _ as *mut _);
+ }
+ ObjectRef {
+ ptr: self as *const _,
+ }
+ }
+
+ /// Creates a new handle for the object associated with a given `File`
+ /// (or returns an existing one).
+ fn create_handle(
+ &self,
+ file: &file::File<<<Self as IntoGEMObject>::Driver as drv::Driver>::File>,
+ ) -> Result<u32> {
+ let mut handle: u32 = 0;
+ // SAFETY: The arguments are all valid per the type invariants.
+ to_result(unsafe {
+ bindings::drm_gem_handle_create(
+ file.raw() as *mut _,
+ self.gem_obj() as *const _ as *mut _,
+ &mut handle,
+ )
+ })?;
+ Ok(handle)
+ }
+
+ /// Looks up an object by its handle for a given `File`.
+ fn lookup_handle(
+ file: &file::File<<<Self as IntoGEMObject>::Driver as drv::Driver>::File>,
+ handle: u32,
+ ) -> Result<ObjectRef<Self>> {
+ // SAFETY: The arguments are all valid per the type invariants.
+ let ptr = unsafe { bindings::drm_gem_object_lookup(file.raw() as *mut _, handle) };
+
+ if ptr.is_null() {
+ Err(ENOENT)
+ } else {
+ Ok(ObjectRef {
+ ptr: ptr as *const _,
+ })
+ }
+ }
+
+ /// Creates an mmap offset to map the object from userspace.
+ fn create_mmap_offset(&self) -> Result<u64> {
+ // SAFETY: The arguments are valid per the type invariant.
+ to_result(unsafe {
+ // TODO: is this threadsafe?
+ bindings::drm_gem_create_mmap_offset(self.gem_obj() as *const _ as *mut _)
+ })?;
+ Ok(unsafe {
+ bindings::drm_vma_node_offset_addr(&self.gem_obj().vma_node as *const _ as *mut _)
+ })
+ }
+}
+
+impl<T: IntoGEMObject> BaseObject for T {}
+
+/// A base GEM object.
+#[repr(C)]
+pub struct Object<T: DriverObject> {
+ obj: bindings::drm_gem_object,
+ // The DRM core ensures the Device exists as long as its objects exist, so we don't need to
+ // manage the reference count here.
+ dev: ManuallyDrop<device::Device<T::Driver>>,
+ inner: T,
+}
+
+impl<T: DriverObject> Object<T> {
+ /// The size of this object's structure.
+ pub const SIZE: usize = mem::size_of::<Self>();
+
+ const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
+ free: Some(free_callback::<T>),
+ open: Some(open_callback::<T, Object<T>>),
+ close: Some(close_callback::<T, Object<T>>),
+ print_info: None,
+ export: None,
+ pin: None,
+ unpin: None,
+ get_sg_table: None,
+ vmap: None,
+ vunmap: None,
+ mmap: None,
+ vm_ops: core::ptr::null_mut(),
+ };
+
+ /// Create a new GEM object.
+ pub fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<UniqueObjectRef<Self>> {
+ let mut obj: Box<Self> = Box::try_new(Self {
+ // SAFETY: This struct is expected to be zero-initialized
+ obj: unsafe { mem::zeroed() },
+ // SAFETY: The drm subsystem guarantees that the drm_device will live as long as
+ // the GEM object lives, so we can conjure a reference out of thin air.
+ dev: ManuallyDrop::new(unsafe { device::Device::from_raw(dev.ptr) }),
+ inner: T::new(dev, size)?,
+ })?;
+
+ obj.obj.funcs = &Self::OBJECT_FUNCS;
+ to_result(unsafe {
+ bindings::drm_gem_object_init(dev.raw() as *mut _, &mut obj.obj, size)
+ })?;
+
+ let obj_ref = UniqueObjectRef {
+ ptr: Box::leak(obj),
+ };
+
+ Ok(obj_ref)
+ }
+
+ /// Returns the `Device` that owns this GEM object.
+ pub fn dev(&self) -> &device::Device<T::Driver> {
+ &self.dev
+ }
+}
+
+impl<T: DriverObject> crate::private::Sealed for Object<T> {}
+
+impl<T: DriverObject> Deref for Object<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<T: DriverObject> DerefMut for Object<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+impl<T: DriverObject> drv::AllocImpl for Object<T> {
+ const ALLOC_OPS: drv::AllocOps = drv::AllocOps {
+ gem_create_object: None,
+ prime_handle_to_fd: Some(bindings::drm_gem_prime_handle_to_fd),
+ prime_fd_to_handle: Some(bindings::drm_gem_prime_fd_to_handle),
+ gem_prime_import: None,
+ gem_prime_import_sg_table: None,
+ gem_prime_mmap: Some(bindings::drm_gem_prime_mmap),
+ dumb_create: None,
+ dumb_map_offset: None,
+ dumb_destroy: None,
+ };
+}
+
+/// A reference-counted shared reference to a base GEM object.
+pub struct ObjectRef<T: IntoGEMObject> {
+ // Invariant: the pointer is valid and initialized, and this ObjectRef owns a reference to it.
+ ptr: *const T,
+}
+
+/// SAFETY: GEM object references are safe to share between threads.
+unsafe impl<T: IntoGEMObject> Send for ObjectRef<T> {}
+unsafe impl<T: IntoGEMObject> Sync for ObjectRef<T> {}
+
+impl<T: IntoGEMObject> Clone for ObjectRef<T> {
+ fn clone(&self) -> Self {
+ self.reference()
+ }
+}
+
+impl<T: IntoGEMObject> Drop for ObjectRef<T> {
+ fn drop(&mut self) {
+ // SAFETY: Having an ObjectRef implies holding a GEM reference.
+ // The free callback will take care of deallocation.
+ unsafe {
+ bindings::drm_gem_object_put((*self.ptr).gem_obj() as *const _ as *mut _);
+ }
+ }
+}
+
+impl<T: IntoGEMObject> Deref for ObjectRef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The pointer is valid per the invariant
+ unsafe { &*self.ptr }
+ }
+}
+
+/// A unique reference to a base GEM object.
+pub struct UniqueObjectRef<T: IntoGEMObject> {
+ // Invariant: the pointer is valid and initialized, and this ObjectRef owns the only reference
+ // to it.
+ ptr: *mut T,
+}
+
+impl<T: IntoGEMObject> UniqueObjectRef<T> {
+ /// Downgrade this reference to a shared reference.
+ pub fn into_ref(self) -> ObjectRef<T> {
+ let ptr = self.ptr as *const _;
+ core::mem::forget(self);
+
+ ObjectRef { ptr }
+ }
+}
+
+impl<T: IntoGEMObject> Drop for UniqueObjectRef<T> {
+ fn drop(&mut self) {
+ // SAFETY: Having a UniqueObjectRef implies holding a GEM
+ // reference. The free callback will take care of deallocation.
+ unsafe {
+ bindings::drm_gem_object_put((*self.ptr).gem_obj() as *const _ as *mut _);
+ }
+ }
+}
+
+impl<T: IntoGEMObject> Deref for UniqueObjectRef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The pointer is valid per the invariant
+ unsafe { &*self.ptr }
+ }
+}
+
+impl<T: IntoGEMObject> DerefMut for UniqueObjectRef<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: The pointer is valid per the invariant
+ unsafe { &mut *self.ptr }
+ }
+}
+
+pub(super) fn create_fops() -> bindings::file_operations {
+ bindings::file_operations {
+ owner: core::ptr::null_mut(),
+ open: Some(bindings::drm_open),
+ release: Some(bindings::drm_release),
+ unlocked_ioctl: Some(bindings::drm_ioctl),
+ #[cfg(CONFIG_COMPAT)]
+ compat_ioctl: Some(bindings::drm_compat_ioctl),
+ #[cfg(not(CONFIG_COMPAT))]
+ compat_ioctl: None,
+ poll: Some(bindings::drm_poll),
+ read: Some(bindings::drm_read),
+ llseek: Some(bindings::noop_llseek),
+ mmap: Some(bindings::drm_gem_mmap),
+ ..Default::default()
+ }
+}
diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs
new file mode 100644
index 000000000000..8f17eba0be99
--- /dev/null
+++ b/rust/kernel/drm/gem/shmem.rs
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! DRM GEM shmem helper objects
+//!
+//! C header: [`include/linux/drm/drm_gem_shmem_helper.h`](../../../../include/linux/drm/drm_gem_shmem_helper.h)
+
+use crate::drm::{device, drv, gem};
+use crate::{
+ error::{from_kernel_err_ptr, to_result},
+ prelude::*,
+};
+use core::{
+ marker::PhantomData,
+ mem,
+ mem::{ManuallyDrop, MaybeUninit},
+ ops::{Deref, DerefMut},
+ ptr::addr_of_mut,
+ slice,
+};
+
+use gem::BaseObject;
+
+/// Trait which must be implemented by drivers using shmem-backed GEM objects.
+pub trait DriverObject: gem::BaseDriverObject<Object<Self>> {
+ /// Parent `Driver` for this object.
+ type Driver: drv::Driver;
+}
+
+// FIXME: This is terrible and I don't know how to avoid it
+#[cfg(CONFIG_NUMA)]
+macro_rules! vm_numa_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::vm_operations_struct {
+ $( $field: $val ),*,
+ set_policy: None,
+ get_policy: None,
+ }
+ }
+}
+
+#[cfg(not(CONFIG_NUMA))]
+macro_rules! vm_numa_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::vm_operations_struct {
+ $( $field: $val ),*
+ }
+ }
+}
+
+const SHMEM_VM_OPS: bindings::vm_operations_struct = vm_numa_fields! {
+ open: Some(bindings::drm_gem_shmem_vm_open),
+ close: Some(bindings::drm_gem_shmem_vm_close),
+ may_split: None,
+ mremap: None,
+ mprotect: None,
+ fault: Some(bindings::drm_gem_shmem_fault),
+ huge_fault: None,
+ map_pages: None,
+ pagesize: None,
+ page_mkwrite: None,
+ pfn_mkwrite: None,
+ access: None,
+ name: None,
+ find_special_page: None,
+};
+
+/// A shmem-backed GEM object.
+#[repr(C)]
+pub struct Object<T: DriverObject> {
+ obj: bindings::drm_gem_shmem_object,
+ // The DRM core ensures the Device exists as long as its objects exist, so we don't need to
+ // manage the reference count here.
+ dev: ManuallyDrop<device::Device<T::Driver>>,
+ inner: T,
+}
+
+unsafe extern "C" fn gem_create_object<T: DriverObject>(
+ raw_dev: *mut bindings::drm_device,
+ size: usize,
+) -> *mut bindings::drm_gem_object {
+ // SAFETY: GEM ensures the device lives as long as its objects live,
+ // so we can conjure up a reference from thin air and never drop it.
+ let dev = ManuallyDrop::new(unsafe { device::Device::from_raw(raw_dev) });
+
+ let inner = match T::new(&*dev, size) {
+ Ok(v) => v,
+ Err(e) => return e.to_ptr(),
+ };
+
+ let p = unsafe {
+ bindings::krealloc(
+ core::ptr::null(),
+ Object::<T>::SIZE,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ ) as *mut Object<T>
+ };
+
+ if p.is_null() {
+ return ENOMEM.to_ptr();
+ }
+
+ // SAFETY: p is valid as long as the alloc succeeded
+ unsafe {
+ addr_of_mut!((*p).dev).write(dev);
+ addr_of_mut!((*p).inner).write(inner);
+ }
+
+ // SAFETY: drm_gem_shmem_object is safe to zero-init, and
+ // the rest of Object has been initialized
+ let new: &mut Object<T> = unsafe { &mut *(p as *mut _) };
+
+ new.obj.base.funcs = &Object::<T>::VTABLE;
+ &mut new.obj.base
+}
+
+unsafe extern "C" fn free_callback<T: DriverObject>(obj: *mut bindings::drm_gem_object) {
+ // SAFETY: All of our objects are Object<T>.
+ let p = crate::container_of!(obj, Object<T>, obj) as *mut Object<T>;
+
+ // SAFETY: p is never used after this
+ unsafe {
+ core::ptr::drop_in_place(&mut (*p).inner);
+ }
+
+ // SAFETY: This pointer has to be valid, since p is valid
+ unsafe {
+ bindings::drm_gem_shmem_free(&mut (*p).obj);
+ }
+}
+
+impl<T: DriverObject> Object<T> {
+ /// The size of this object's structure.
+ const SIZE: usize = mem::size_of::<Self>();
+
+ /// `drm_gem_object_funcs` vtable suitable for GEM shmem objects.
+ const VTABLE: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
+ free: Some(free_callback::<T>),
+ open: Some(super::open_callback::<T, Object<T>>),
+ close: Some(super::close_callback::<T, Object<T>>),
+ print_info: Some(bindings::drm_gem_shmem_object_print_info),
+ export: None,
+ pin: Some(bindings::drm_gem_shmem_object_pin),
+ unpin: Some(bindings::drm_gem_shmem_object_unpin),
+ get_sg_table: Some(bindings::drm_gem_shmem_object_get_sg_table),
+ vmap: Some(bindings::drm_gem_shmem_object_vmap),
+ vunmap: Some(bindings::drm_gem_shmem_object_vunmap),
+ mmap: Some(bindings::drm_gem_shmem_object_mmap),
+ vm_ops: &SHMEM_VM_OPS,
+ };
+
+ // SAFETY: Must only be used with DRM functions that are thread-safe
+ unsafe fn mut_shmem(&self) -> *mut bindings::drm_gem_shmem_object {
+ &self.obj as *const _ as *mut _
+ }
+
+ /// Create a new shmem-backed DRM object of the given size.
+ pub fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<gem::UniqueObjectRef<Self>> {
+ // SAFETY: This function can be called as long as the ALLOC_OPS are set properly
+ // for this driver, and the gem_create_object is called.
+ let p = unsafe { bindings::drm_gem_shmem_create(dev.raw() as *mut _, size) };
+ let p = crate::container_of!(p, Object<T>, obj) as *mut _;
+
+ // SAFETY: The gem_create_object callback ensures this is a valid Object<T>,
+ // so we can take a unique reference to it.
+ let obj_ref = gem::UniqueObjectRef { ptr: p };
+
+ Ok(obj_ref)
+ }
+
+ /// Returns the `Device` that owns this GEM object.
+ pub fn dev(&self) -> &device::Device<T::Driver> {
+ &self.dev
+ }
+
+ /// Creates (if necessary) and returns a scatter-gather table of DMA pages for this object.
+ ///
+ /// This will pin the object in memory.
+ pub fn sg_table(&self) -> Result<SGTable<T>> {
+ // SAFETY: drm_gem_shmem_get_pages_sgt is thread-safe.
+ let sgt = from_kernel_err_ptr(unsafe {
+ bindings::drm_gem_shmem_get_pages_sgt(self.mut_shmem())
+ })?;
+
+ Ok(SGTable {
+ sgt,
+ _owner: self.reference(),
+ })
+ }
+
+ /// Creates and returns a virtual kernel memory mapping for this object.
+ pub fn vmap(&self) -> Result<VMap<T>> {
+ let mut map: MaybeUninit<bindings::iosys_map> = MaybeUninit::uninit();
+
+ // SAFETY: drm_gem_shmem_vmap is thread-safe
+ to_result(unsafe { bindings::drm_gem_shmem_vmap(self.mut_shmem(), map.as_mut_ptr()) })?;
+
+ // SAFETY: if drm_gem_shmem_vmap did not fail, map is initialized now
+ let map = unsafe { map.assume_init() };
+
+ Ok(VMap {
+ map,
+ owner: self.reference(),
+ })
+ }
+
+ /// Set the write-combine flag for this object.
+ ///
+ /// Should be called before any mappings are made.
+ pub fn set_wc(&mut self, map_wc: bool) {
+ unsafe { (*self.mut_shmem()).map_wc = map_wc };
+ }
+}
+
+impl<T: DriverObject> Deref for Object<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<T: DriverObject> DerefMut for Object<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+impl<T: DriverObject> crate::private::Sealed for Object<T> {}
+
+impl<T: DriverObject> gem::IntoGEMObject for Object<T> {
+ type Driver = T::Driver;
+
+ fn gem_obj(&self) -> &bindings::drm_gem_object {
+ &self.obj.base
+ }
+
+ fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object {
+ &mut self.obj.base
+ }
+
+ fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Object<T> {
+ crate::container_of!(obj, Object<T>, obj) as *mut Object<T>
+ }
+}
+
+impl<T: DriverObject> drv::AllocImpl for Object<T> {
+ const ALLOC_OPS: drv::AllocOps = drv::AllocOps {
+ gem_create_object: Some(gem_create_object::<T>),
+ prime_handle_to_fd: Some(bindings::drm_gem_prime_handle_to_fd),
+ prime_fd_to_handle: Some(bindings::drm_gem_prime_fd_to_handle),
+ gem_prime_import: None,
+ gem_prime_import_sg_table: Some(bindings::drm_gem_shmem_prime_import_sg_table),
+ gem_prime_mmap: Some(bindings::drm_gem_prime_mmap),
+ dumb_create: Some(bindings::drm_gem_shmem_dumb_create),
+ dumb_map_offset: None,
+ dumb_destroy: None,
+ };
+}
+
+/// A virtual mapping for a shmem-backed GEM object in kernel address space.
+pub struct VMap<T: DriverObject> {
+ map: bindings::iosys_map,
+ owner: gem::ObjectRef<Object<T>>,
+}
+
+impl<T: DriverObject> VMap<T> {
+ /// Returns a const raw pointer to the start of the mapping.
+ pub fn as_ptr(&self) -> *const core::ffi::c_void {
+ // SAFETY: The shmem helpers always return non-iomem maps
+ unsafe { self.map.__bindgen_anon_1.vaddr }
+ }
+
+ /// Returns a mutable raw pointer to the start of the mapping.
+ pub fn as_mut_ptr(&mut self) -> *mut core::ffi::c_void {
+ // SAFETY: The shmem helpers always return non-iomem maps
+ unsafe { self.map.__bindgen_anon_1.vaddr }
+ }
+
+ /// Returns a byte slice view of the mapping.
+ pub fn as_slice(&self) -> &[u8] {
+ // SAFETY: The vmap maps valid memory up to the owner size
+ unsafe { slice::from_raw_parts(self.as_ptr() as *const u8, self.owner.size()) }
+ }
+
+ /// Returns mutable a byte slice view of the mapping.
+ pub fn as_mut_slice(&mut self) -> &mut [u8] {
+ // SAFETY: The vmap maps valid memory up to the owner size
+ unsafe { slice::from_raw_parts_mut(self.as_mut_ptr() as *mut u8, self.owner.size()) }
+ }
+
+ /// Borrows a reference to the object that owns this virtual mapping.
+ pub fn owner(&self) -> &gem::ObjectRef<Object<T>> {
+ &self.owner
+ }
+}
+
+impl<T: DriverObject> Drop for VMap<T> {
+ fn drop(&mut self) {
+ // SAFETY: This function is thread-safe
+ unsafe {
+ bindings::drm_gem_shmem_vunmap(self.owner.mut_shmem(), &mut self.map);
+ }
+ }
+}
+
+/// SAFETY: `iosys_map` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Send for VMap<T> {}
+unsafe impl<T: DriverObject> Sync for VMap<T> {}
+
+/// A single scatter-gather entry, representing a span of pages in the device's DMA address space.
+///
+/// For devices not behind a standalone IOMMU, this corresponds to physical addresses.
+#[repr(transparent)]
+pub struct SGEntry(bindings::scatterlist);
+
+impl SGEntry {
+ /// Returns the starting DMA address of this span
+ pub fn dma_address(&self) -> usize {
+ (unsafe { bindings::sg_dma_address(&self.0) }) as usize
+ }
+
+ /// Returns the length of this span in bytes
+ pub fn dma_len(&self) -> usize {
+ (unsafe { bindings::sg_dma_len(&self.0) }) as usize
+ }
+}
+
+/// A scatter-gather table of DMA address spans for a GEM shmem object.
+///
+/// # Invariants
+/// `sgt` must be a valid pointer to the `sg_table`, which must correspond to the owned
+/// object in `_owner` (which ensures it remains valid).
+pub struct SGTable<T: DriverObject> {
+ sgt: *const bindings::sg_table,
+ _owner: gem::ObjectRef<Object<T>>,
+}
+
+impl<T: DriverObject> SGTable<T> {
+ /// Returns an iterator through the SGTable's entries
+ pub fn iter(&'_ self) -> SGTableIter<'_> {
+ SGTableIter {
+ left: unsafe { (*self.sgt).nents } as usize,
+ sg: unsafe { (*self.sgt).sgl },
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<'a, T: DriverObject> IntoIterator for &'a SGTable<T> {
+ type Item = &'a SGEntry;
+ type IntoIter = SGTableIter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// SAFETY: `sg_table` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Send for SGTable<T> {}
+unsafe impl<T: DriverObject> Sync for SGTable<T> {}
+
+/// An iterator through `SGTable` entries.
+///
+/// # Invariants
+/// `sg` must be a valid pointer to the scatterlist, which must outlive our lifetime.
+pub struct SGTableIter<'a> {
+ sg: *mut bindings::scatterlist,
+ left: usize,
+ _p: PhantomData<&'a ()>,
+}
+
+impl<'a> Iterator for SGTableIter<'a> {
+ type Item = &'a SGEntry;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.left == 0 {
+ None
+ } else {
+ let sg = self.sg;
+ self.sg = unsafe { bindings::sg_next(self.sg) };
+ self.left -= 1;
+ Some(unsafe { &(*(sg as *const SGEntry)) })
+ }
+ }
+}
diff --git a/rust/kernel/drm/ioctl.rs b/rust/kernel/drm/ioctl.rs
new file mode 100644
index 000000000000..10304efbd5f1
--- /dev/null
+++ b/rust/kernel/drm/ioctl.rs
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+#![allow(non_snake_case)]
+
+//! DRM IOCTL definitions.
+//!
+//! C header: [`include/linux/drm/drm_ioctl.h`](../../../../include/linux/drm/drm_ioctl.h)
+
+use crate::ioctl;
+
+const BASE: u32 = bindings::DRM_IOCTL_BASE as u32;
+
+/// Construct a DRM ioctl number with no argument.
+pub const fn IO(nr: u32) -> u32 {
+ ioctl::_IO(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-only argument.
+pub const fn IOR<T>(nr: u32) -> u32 {
+ ioctl::_IOR::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a write-only argument.
+pub const fn IOW<T>(nr: u32) -> u32 {
+ ioctl::_IOW::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-write argument.
+pub const fn IOWR<T>(nr: u32) -> u32 {
+ ioctl::_IOWR::<T>(BASE, nr)
+}
+
+/// Descriptor type for DRM ioctls. Use the `declare_drm_ioctls!{}` macro to construct them.
+pub type DrmIoctlDescriptor = bindings::drm_ioctl_desc;
+
+/// This is for ioctl which are used for rendering, and require that the file descriptor is either
+/// for a render node, or if it’s a legacy/primary node, then it must be authenticated.
+pub const AUTH: u32 = bindings::drm_ioctl_flags_DRM_AUTH;
+
+/// This must be set for any ioctl which can change the modeset or display state. Userspace must
+/// call the ioctl through a primary node, while it is the active master.
+///
+/// Note that read-only modeset ioctl can also be called by unauthenticated clients, or when a
+/// master is not the currently active one.
+pub const MASTER: u32 = bindings::drm_ioctl_flags_DRM_MASTER;
+
+/// Anything that could potentially wreak a master file descriptor needs to have this flag set.
+///
+/// Current that’s only for the SETMASTER and DROPMASTER ioctl, which e.g. logind can call to force
+/// a non-behaving master (display compositor) into compliance.
+///
+/// This is equivalent to callers with the SYSADMIN capability.
+pub const ROOT_ONLY: u32 = bindings::drm_ioctl_flags_DRM_ROOT_ONLY;
+
+/// Whether drm_ioctl_desc.func should be called with the DRM BKL held or not. Enforced as the
+/// default for all modern drivers, hence there should never be a need to set this flag.
+///
+/// Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the only legacy IOCTL which
+/// needs this.
+pub const UNLOCKED: u32 = bindings::drm_ioctl_flags_DRM_UNLOCKED;
+
+/// This is used for all ioctl needed for rendering only, for drivers which support render nodes.
+/// This should be all new render drivers, and hence it should be always set for any ioctl with
+/// `AUTH` set. Note though that read-only query ioctl might have this set, but have not set
+/// DRM_AUTH because they do not require authentication.
+pub const RENDER_ALLOW: u32 = bindings::drm_ioctl_flags_DRM_RENDER_ALLOW;
+
+/// Declare the DRM ioctls for a driver.
+///
+/// Each entry in the list should have the form:
+///
+/// `(ioctl_number, argument_type, flags, user_callback),`
+///
+/// `argument_type` is the type name within the `bindings` crate.
+/// `user_callback` should have the following prototype:
+///
+/// ```
+/// fn foo(device: &kernel::drm::device::Device<Self>,
+/// data: &mut bindings::argument_type,
+/// file: &kernel::drm::file::File<Self::File>,
+/// )
+/// ```
+/// where `Self` is the drm::drv::Driver implementation these ioctls are being declared within.
+///
+/// # Examples
+///
+/// ```
+/// kernel::declare_drm_ioctls! {
+/// (FOO_GET_PARAM, drm_foo_get_param, ioctl::RENDER_ALLOW, my_get_param_handler),
+/// }
+/// ```
+///
+#[macro_export]
+macro_rules! declare_drm_ioctls {
+ ( $(($cmd:ident, $struct:ident, $flags:expr, $func:expr)),* $(,)? ) => {
+ const IOCTLS: &'static [$crate::drm::ioctl::DrmIoctlDescriptor] = {
+ const _:() = {
+ let i: u32 = $crate::bindings::DRM_COMMAND_BASE;
+ // Assert that all the IOCTLs are in the right order and there are no gaps,
+ // and that the sizeof of the specified type is correct.
+ $(
+ let cmd: u32 = $crate::macros::concat_idents!($crate::bindings::DRM_IOCTL_, $cmd);
+ ::core::assert!(i == $crate::ioctl::_IOC_NR(cmd));
+ ::core::assert!(core::mem::size_of::<$crate::bindings::$struct>() == $crate::ioctl::_IOC_SIZE(cmd));
+ let i: u32 = i + 1;
+ )*
+ };
+
+ let ioctls = &[$(
+ $crate::bindings::drm_ioctl_desc {
+ cmd: $crate::macros::concat_idents!($crate::bindings::DRM_IOCTL_, $cmd) as u32,
+ func: {
+ #[allow(non_snake_case)]
+ unsafe extern "C" fn $cmd(
+ raw_dev: *mut $crate::bindings::drm_device,
+ raw_data: *mut ::core::ffi::c_void,
+ raw_file_priv: *mut $crate::bindings::drm_file,
+ ) -> core::ffi::c_int {
+ // SAFETY: We never drop this, and the DRM core ensures the device lives
+ // while callbacks are being called.
+ //
+ // FIXME: Currently there is nothing enforcing that the types of the
+ // dev/file match the current driver these ioctls are being declared
+ // for, and it's not clear how to enforce this within the type system.
+ let dev = ::core::mem::ManuallyDrop::new(unsafe {
+ $crate::drm::device::Device::from_raw(raw_dev)
+ });
+ // SAFETY: This is just the ioctl argument, which hopefully has the right type
+ // (we've done our best checking the size).
+ let data = unsafe { &mut *(raw_data as *mut $crate::bindings::$struct) };
+ // SAFETY: This is just the DRM file structure
+ let file = unsafe { $crate::drm::file::File::from_raw(raw_file_priv) };
+
+ match $func(&*dev, data, &file) {
+ Err(e) => e.to_kernel_errno(),
+ Ok(i) => i.try_into().unwrap_or(ERANGE.to_kernel_errno()),
+ }
+ }
+ Some($cmd)
+ },
+ flags: $flags,
+ name: $crate::c_str!(::core::stringify!($cmd)).as_char_ptr(),
+ }
+ ),*];
+ ioctls
+ };
+ };
+}
diff --git a/rust/kernel/drm/mm.rs b/rust/kernel/drm/mm.rs
new file mode 100644
index 000000000000..83e27a7dcc7e
--- /dev/null
+++ b/rust/kernel/drm/mm.rs
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM MM range allocator
+//!
+//! C header: [`include/linux/drm/drm_mm.h`](../../../../include/linux/drm/drm_mm.h)
+
+use crate::{
+ bindings,
+ error::{to_result, Result},
+ str::CStr,
+ sync::{Arc, LockClassKey, LockIniter, Mutex, UniqueArc},
+ types::Opaque,
+};
+
+use alloc::boxed::Box;
+
+use core::{
+ marker::{PhantomData, PhantomPinned},
+ ops::Deref,
+ pin::Pin,
+};
+
+/// Type alias representing a DRM MM node.
+pub type Node<A, T> = Pin<Box<NodeData<A, T>>>;
+
+/// Trait which must be implemented by the inner allocator state type provided by the user.
+pub trait AllocInner<T> {
+ /// Notification that a node was dropped from the allocator.
+ fn drop_object(&mut self, _start: u64, _size: u64, _color: usize, _object: &mut T) {}
+}
+
+impl<T> AllocInner<T> for () {}
+
+/// Wrapper type for a `struct drm_mm` plus user AllocInner object.
+///
+/// # Invariants
+/// The `drm_mm` struct is valid and initialized.
+struct MmInner<A: AllocInner<T>, T>(Opaque<bindings::drm_mm>, A, PhantomData<T>);
+
+/// Represents a single allocated node in the MM allocator
+pub struct NodeData<A: AllocInner<T>, T> {
+ node: bindings::drm_mm_node,
+ mm: Arc<Mutex<MmInner<A, T>>>,
+ valid: bool,
+ /// A drm_mm_node needs to be pinned because nodes reference each other in a linked list.
+ _pin: PhantomPinned,
+ inner: T,
+}
+
+// SAFETY: Allocator ops take the mutex, and there are no mutable actions on the node.
+unsafe impl<A: Send + AllocInner<T>, T: Send> Send for NodeData<A, T> {}
+unsafe impl<A: Send + AllocInner<T>, T: Sync> Sync for NodeData<A, T> {}
+
+/// Available MM node insertion modes
+#[repr(u32)]
+pub enum InsertMode {
+ /// Search for the smallest hole (within the search range) that fits the desired node.
+ ///
+ /// Allocates the node from the bottom of the found hole.
+ Best = bindings::drm_mm_insert_mode_DRM_MM_INSERT_BEST,
+
+ /// Search for the lowest hole (address closest to 0, within the search range) that fits the
+ /// desired node.
+ ///
+ /// Allocates the node from the bottom of the found hole.
+ Low = bindings::drm_mm_insert_mode_DRM_MM_INSERT_LOW,
+
+ /// Search for the highest hole (address closest to U64_MAX, within the search range) that fits
+ /// the desired node.
+ ///
+ /// Allocates the node from the top of the found hole. The specified alignment for the node is
+ /// applied to the base of the node (`Node.start()`).
+ High = bindings::drm_mm_insert_mode_DRM_MM_INSERT_HIGH,
+
+ /// Search for the most recently evicted hole (within the search range) that fits the desired
+ /// node. This is appropriate for use immediately after performing an eviction scan and removing
+ /// the selected nodes to form a hole.
+ ///
+ /// Allocates the node from the bottom of the found hole.
+ Evict = bindings::drm_mm_insert_mode_DRM_MM_INSERT_EVICT,
+}
+
+/// A clonable, interlocked reference to the allocator state.
+///
+/// This is useful to perform actions on the user-supplied `AllocInner<T>` type given just a Node,
+/// without immediately taking the lock.
+#[derive(Clone)]
+pub struct InnerRef<A: AllocInner<T>, T>(Arc<Mutex<MmInner<A, T>>>);
+
+impl<A: AllocInner<T>, T> InnerRef<A, T> {
+ /// Operate on the user `AllocInner<T>` implementation, taking the lock.
+ pub fn with<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+ let mut l = self.0.lock();
+ cb(&mut l.1)
+ }
+}
+
+impl<A: AllocInner<T>, T> NodeData<A, T> {
+ /// Returns the color of the node (an opaque value)
+ pub fn color(&self) -> usize {
+ self.node.color as usize
+ }
+
+ /// Returns the start address of the node
+ pub fn start(&self) -> u64 {
+ self.node.start
+ }
+
+ /// Returns the size of the node in bytes
+ pub fn size(&self) -> u64 {
+ self.node.size
+ }
+
+ /// Operate on the user `AllocInner<T>` implementation associated with this node's allocator.
+ pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+ let mut l = self.mm.lock();
+ cb(&mut l.1)
+ }
+
+ /// Return a clonable, detached reference to the allocator inner data.
+ pub fn alloc_ref(&self) -> InnerRef<A, T> {
+ InnerRef(self.mm.clone())
+ }
+
+ /// Return a mutable reference to the inner data.
+ pub fn inner_mut(self: Pin<&mut Self>) -> &mut T {
+ // SAFETY: This is okay because inner is not structural
+ unsafe { &mut self.get_unchecked_mut().inner }
+ }
+}
+
+impl<A: AllocInner<T>, T> Deref for NodeData<A, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<A: AllocInner<T>, T> Drop for NodeData<A, T> {
+ fn drop(&mut self) {
+ if self.valid {
+ let mut guard = self.mm.lock();
+
+ // Inform the user allocator that a node is being dropped.
+ guard
+ .1
+ .drop_object(self.start(), self.size(), self.color(), &mut self.inner);
+ // SAFETY: The MM lock is still taken, so we can safely remove the node.
+ unsafe { bindings::drm_mm_remove_node(&mut self.node) };
+ }
+ }
+}
+
+/// An instance of a DRM MM range allocator.
+pub struct Allocator<A: AllocInner<T>, T> {
+ mm: Arc<Mutex<MmInner<A, T>>>,
+ _p: PhantomData<T>,
+}
+
+impl<A: AllocInner<T>, T> Allocator<A, T> {
+ /// Create a new range allocator for the given start and size range of addresses.
+ ///
+ /// The user may optionally provide an inner object representing allocator state, which will
+ /// be protected by the same lock. If not required, `()` can be used.
+ pub fn new(
+ start: u64,
+ size: u64,
+ inner: A,
+ name: &'static CStr,
+ lock_key: &'static LockClassKey,
+ ) -> Result<Allocator<A, T>> {
+ // SAFETY: We call `Mutex::init_lock` below.
+ let mut mm: Pin<UniqueArc<Mutex<MmInner<A, T>>>> = UniqueArc::try_new(unsafe {
+ Mutex::new(MmInner(Opaque::uninit(), inner, PhantomData))
+ })?
+ .into();
+
+ mm.as_mut().init_lock(name, lock_key);
+
+ unsafe {
+ // SAFETY: The Opaque instance provides a valid pointer, and it is initialized after
+ // this call.
+ bindings::drm_mm_init(mm.lock().0.get(), start, size);
+ }
+
+ Ok(Allocator {
+ mm: mm.into(),
+ _p: PhantomData,
+ })
+ }
+
+ /// Insert a new node into the allocator of a given size.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ pub fn insert_node(&mut self, node: T, size: u64) -> Result<Node<A, T>> {
+ self.insert_node_generic(node, size, 0, 0, InsertMode::Best)
+ }
+
+ /// Insert a new node into the allocator of a given size, with configurable alignment,
+ /// color, and insertion mode.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ pub fn insert_node_generic(
+ &mut self,
+ node: T,
+ size: u64,
+ alignment: u64,
+ color: usize,
+ mode: InsertMode,
+ ) -> Result<Node<A, T>> {
+ self.insert_node_in_range(node, size, alignment, color, 0, u64::MAX, mode)
+ }
+
+ /// Insert a new node into the allocator of a given size, with configurable alignment,
+ /// color, insertion mode, and sub-range to allocate from.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ #[allow(clippy::too_many_arguments)]
+ pub fn insert_node_in_range(
+ &mut self,
+ node: T,
+ size: u64,
+ alignment: u64,
+ color: usize,
+ start: u64,
+ end: u64,
+ mode: InsertMode,
+ ) -> Result<Node<A, T>> {
+ let mut mm_node = Box::try_new(NodeData {
+ // SAFETY: This C struct should be zero-initialized.
+ node: unsafe { core::mem::zeroed() },
+ valid: false,
+ inner: node,
+ mm: self.mm.clone(),
+ _pin: PhantomPinned,
+ })?;
+
+ let guard = self.mm.lock();
+ // SAFETY: We hold the lock and all pointers are valid.
+ to_result(unsafe {
+ bindings::drm_mm_insert_node_in_range(
+ guard.0.get(),
+ &mut mm_node.node,
+ size,
+ alignment,
+ color as core::ffi::c_ulong,
+ start,
+ end,
+ mode as u32,
+ )
+ })?;
+
+ mm_node.valid = true;
+
+ Ok(Pin::from(mm_node))
+ }
+
+ /// Insert a node into the allocator at a fixed start address.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ pub fn reserve_node(
+ &mut self,
+ node: T,
+ start: u64,
+ size: u64,
+ color: usize,
+ ) -> Result<Node<A, T>> {
+ let mut mm_node = Box::try_new(NodeData {
+ // SAFETY: This C struct should be zero-initialized.
+ node: unsafe { core::mem::zeroed() },
+ valid: false,
+ inner: node,
+ mm: self.mm.clone(),
+ _pin: PhantomPinned,
+ })?;
+
+ mm_node.node.start = start;
+ mm_node.node.size = size;
+ mm_node.node.color = color as core::ffi::c_ulong;
+
+ let guard = self.mm.lock();
+ // SAFETY: We hold the lock and all pointers are valid.
+ to_result(unsafe { bindings::drm_mm_reserve_node(guard.0.get(), &mut mm_node.node) })?;
+
+ mm_node.valid = true;
+
+ Ok(Pin::from(mm_node))
+ }
+
+ /// Operate on the inner user type `A`, taking the allocator lock
+ pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+ let mut guard = self.mm.lock();
+ cb(&mut guard.1)
+ }
+}
+
+impl<A: AllocInner<T>, T> Drop for MmInner<A, T> {
+ fn drop(&mut self) {
+ // SAFETY: If the MmInner is dropped then all nodes are gone (since they hold references),
+ // so it is safe to tear down the allocator.
+ unsafe {
+ bindings::drm_mm_takedown(self.0.get());
+ }
+ }
+}
+
+// MmInner is safely Send if the AllocInner user type is Send.
+unsafe impl<A: Send + AllocInner<T>, T> Send for MmInner<A, T> {}
diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs
new file mode 100644
index 000000000000..b1f182453ec1
--- /dev/null
+++ b/rust/kernel/drm/mod.rs
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM subsystem abstractions.
+
+pub mod device;
+pub mod drv;
+pub mod file;
+pub mod gem;
+pub mod ioctl;
+pub mod mm;
+pub mod sched;
+pub mod syncobj;
diff --git a/rust/kernel/drm/sched.rs b/rust/kernel/drm/sched.rs
new file mode 100644
index 000000000000..a5275cc16179
--- /dev/null
+++ b/rust/kernel/drm/sched.rs
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM Scheduler
+//!
+//! C header: [`include/linux/drm/gpu_scheduler.h`](../../../../include/linux/drm/gpu_scheduler.h)
+
+use crate::{
+ bindings, device,
+ dma_fence::*,
+ error::{to_result, Result},
+ prelude::*,
+ sync::{Arc, UniqueArc},
+};
+use alloc::boxed::Box;
+use core::marker::PhantomData;
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use core::ptr::addr_of_mut;
+
+/// Scheduler status after timeout recovery
+#[repr(u32)]
+pub enum Status {
+ /// Device recovered from the timeout and can execute jobs again
+ Nominal = bindings::drm_gpu_sched_stat_DRM_GPU_SCHED_STAT_NOMINAL,
+ /// Device is no longer available
+ NoDevice = bindings::drm_gpu_sched_stat_DRM_GPU_SCHED_STAT_ENODEV,
+}
+
+/// Scheduler priorities
+#[repr(i32)]
+pub enum Priority {
+ /// Low userspace priority
+ Min = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_MIN,
+ /// Normal userspace priority
+ Normal = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_NORMAL,
+ /// High userspace priority
+ High = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_HIGH,
+ /// Kernel priority (highest)
+ Kernel = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_KERNEL,
+}
+
+/// Trait to be implemented by driver job objects.
+pub trait JobImpl: Sized {
+ /// Called when the scheduler is considering scheduling this job next, to get another Fence
+ /// for this job to block on. Once it returns None, run() may be called.
+ fn prepare(_job: &mut Job<Self>) -> Option<Fence> {
+ None // Equivalent to NULL function pointer
+ }
+
+ /// Called before job execution to check whether the hardware is free enough to run the job.
+ /// This can be used to implement more complex hardware resource policies than the hw_submission
+ /// limit.
+ fn can_run(_job: &mut Job<Self>) -> bool {
+ true
+ }
+
+ /// Called to execute the job once all of the dependencies have been resolved. This may be
+ /// called multiple times, if timed_out() has happened and drm_sched_job_recovery() decides
+ /// to try it again.
+ fn run(job: &mut Job<Self>) -> Result<Option<Fence>>;
+
+ /// Called when a job has taken too long to execute, to trigger GPU recovery.
+ ///
+ /// This method is called in a workqueue context.
+ fn timed_out(job: &mut Job<Self>) -> Status;
+}
+
+unsafe extern "C" fn prepare_job_cb<T: JobImpl>(
+ sched_job: *mut bindings::drm_sched_job,
+ _s_entity: *mut bindings::drm_sched_entity,
+) -> *mut bindings::dma_fence {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ match T::prepare(unsafe { &mut *p }) {
+ None => core::ptr::null_mut(),
+ Some(fence) => fence.into_raw(),
+ }
+}
+
+unsafe extern "C" fn run_job_cb<T: JobImpl>(
+ sched_job: *mut bindings::drm_sched_job,
+) -> *mut bindings::dma_fence {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ match T::run(unsafe { &mut *p }) {
+ Err(e) => e.to_ptr(),
+ Ok(None) => core::ptr::null_mut(),
+ Ok(Some(fence)) => fence.into_raw(),
+ }
+}
+
+unsafe extern "C" fn can_run_job_cb<T: JobImpl>(sched_job: *mut bindings::drm_sched_job) -> bool {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ T::can_run(unsafe { &mut *p })
+}
+
+unsafe extern "C" fn timedout_job_cb<T: JobImpl>(
+ sched_job: *mut bindings::drm_sched_job,
+) -> bindings::drm_gpu_sched_stat {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ T::timed_out(unsafe { &mut *p }) as bindings::drm_gpu_sched_stat
+}
+
+unsafe extern "C" fn free_job_cb<T: JobImpl>(sched_job: *mut bindings::drm_sched_job) {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ // Convert the job back to a Box and drop it
+ // SAFETY: All of our Job<T>s are created inside a box.
+ unsafe { Box::from_raw(p) };
+}
+
+/// A DRM scheduler job.
+pub struct Job<T: JobImpl> {
+ job: bindings::drm_sched_job,
+ inner: T,
+}
+
+impl<T: JobImpl> Deref for Job<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<T: JobImpl> DerefMut for Job<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+impl<T: JobImpl> Drop for Job<T> {
+ fn drop(&mut self) {
+ // SAFETY: At this point the job has either been submitted and this is being called from
+ // `free_job_cb` above, or it hasn't and it is safe to call `drm_sched_job_cleanup`.
+ unsafe { bindings::drm_sched_job_cleanup(&mut self.job) };
+ }
+}
+
+/// A pending DRM scheduler job (not yet armed)
+pub struct PendingJob<'a, T: JobImpl>(Box<Job<T>>, PhantomData<&'a T>);
+
+impl<'a, T: JobImpl> PendingJob<'a, T> {
+ /// Add a fence as a dependency to the job
+ pub fn add_dependency(&mut self, fence: Fence) -> Result {
+ to_result(unsafe {
+ bindings::drm_sched_job_add_dependency(&mut self.0.job, fence.into_raw())
+ })
+ }
+
+ /// Arm the job to make it ready for execution
+ pub fn arm(mut self) -> ArmedJob<'a, T> {
+ unsafe { bindings::drm_sched_job_arm(&mut self.0.job) };
+ ArmedJob(self.0, PhantomData)
+ }
+}
+
+impl<'a, T: JobImpl> Deref for PendingJob<'a, T> {
+ type Target = Job<T>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<'a, T: JobImpl> DerefMut for PendingJob<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+/// An armed DRM scheduler job (not yet submitted)
+pub struct ArmedJob<'a, T: JobImpl>(Box<Job<T>>, PhantomData<&'a T>);
+
+impl<'a, T: JobImpl> ArmedJob<'a, T> {
+ /// Returns the job fences
+ pub fn fences(&self) -> JobFences<'_> {
+ JobFences(unsafe { &mut *self.0.job.s_fence })
+ }
+
+ /// Push the job for execution into the scheduler
+ pub fn push(self) {
+ // After this point, the job is submitted and owned by the scheduler
+ let ptr = match self {
+ ArmedJob(job, _) => Box::<Job<T>>::into_raw(job),
+ };
+
+ // SAFETY: We are passing in ownership of a valid Box raw pointer.
+ unsafe { bindings::drm_sched_entity_push_job(addr_of_mut!((*ptr).job)) };
+ }
+}
+impl<'a, T: JobImpl> Deref for ArmedJob<'a, T> {
+ type Target = Job<T>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<'a, T: JobImpl> DerefMut for ArmedJob<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+/// Reference to the bundle of fences attached to a DRM scheduler job
+pub struct JobFences<'a>(&'a mut bindings::drm_sched_fence);
+
+impl<'a> JobFences<'a> {
+ /// Returns a new reference to the job scheduled fence.
+ pub fn scheduled(&mut self) -> Fence {
+ unsafe { Fence::get_raw(&mut self.0.scheduled) }
+ }
+
+ /// Returns a new reference to the job finished fence.
+ pub fn finished(&mut self) -> Fence {
+ unsafe { Fence::get_raw(&mut self.0.finished) }
+ }
+}
+
+struct EntityInner<T: JobImpl> {
+ entity: bindings::drm_sched_entity,
+ // TODO: Allow users to share guilty flag between entities
+ sched: Arc<SchedulerInner<T>>,
+ guilty: bindings::atomic_t,
+ _p: PhantomData<T>,
+}
+
+impl<T: JobImpl> Drop for EntityInner<T> {
+ fn drop(&mut self) {
+ // SAFETY: The EntityInner is initialized. This will cancel/free all jobs.
+ unsafe { bindings::drm_sched_entity_destroy(&mut self.entity) };
+ }
+}
+
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Sync for EntityInner<T> {}
+unsafe impl<T: JobImpl> Send for EntityInner<T> {}
+
+/// A DRM scheduler entity.
+pub struct Entity<T: JobImpl>(Pin<Box<EntityInner<T>>>);
+
+impl<T: JobImpl> Entity<T> {
+ /// Create a new scheduler entity.
+ pub fn new(sched: &Scheduler<T>, priority: Priority) -> Result<Self> {
+ let mut entity: Box<MaybeUninit<EntityInner<T>>> = Box::try_new_zeroed()?;
+
+ let mut sched_ptr = &sched.0.sched as *const _ as *mut _;
+
+ // SAFETY: The Box is allocated above and valid.
+ unsafe {
+ bindings::drm_sched_entity_init(
+ addr_of_mut!((*entity.as_mut_ptr()).entity),
+ priority as _,
+ &mut sched_ptr,
+ 1,
+ addr_of_mut!((*entity.as_mut_ptr()).guilty),
+ )
+ };
+
+ // SAFETY: The Box is allocated above and valid.
+ unsafe { addr_of_mut!((*entity.as_mut_ptr()).sched).write(sched.0.clone()) };
+
+ // SAFETY: entity is now initialized.
+ Ok(Self(Pin::from(unsafe { entity.assume_init() })))
+ }
+
+ /// Create a new job on this entity.
+ ///
+ /// The entity must outlive the pending job until it transitions into the submitted state,
+ /// after which the scheduler owns it.
+ pub fn new_job(&self, inner: T) -> Result<PendingJob<'_, T>> {
+ let mut job: Box<MaybeUninit<Job<T>>> = Box::try_new_zeroed()?;
+
+ // SAFETY: We hold a reference to the entity (which is a valid pointer),
+ // and the job object was just allocated above.
+ to_result(unsafe {
+ bindings::drm_sched_job_init(
+ addr_of_mut!((*job.as_mut_ptr()).job),
+ &self.0.as_ref().get_ref().entity as *const _ as *mut _,
+ core::ptr::null_mut(),
+ )
+ })?;
+
+ // SAFETY: The Box pointer is valid, and this initializes the inner member.
+ unsafe { addr_of_mut!((*job.as_mut_ptr()).inner).write(inner) };
+
+ // SAFETY: All fields of the Job<T> are now initialized.
+ Ok(PendingJob(unsafe { job.assume_init() }, PhantomData))
+ }
+}
+
+/// DRM scheduler inner data
+pub struct SchedulerInner<T: JobImpl> {
+ sched: bindings::drm_gpu_scheduler,
+ _p: PhantomData<T>,
+}
+
+impl<T: JobImpl> Drop for SchedulerInner<T> {
+ fn drop(&mut self) {
+ // SAFETY: The scheduler is valid. This assumes drm_sched_fini() will take care of
+ // freeing all in-progress jobs.
+ unsafe { bindings::drm_sched_fini(&mut self.sched) };
+ }
+}
+
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Sync for SchedulerInner<T> {}
+unsafe impl<T: JobImpl> Send for SchedulerInner<T> {}
+
+/// A DRM Scheduler
+pub struct Scheduler<T: JobImpl>(Arc<SchedulerInner<T>>);
+
+impl<T: JobImpl> Scheduler<T> {
+ const OPS: bindings::drm_sched_backend_ops = bindings::drm_sched_backend_ops {
+ prepare_job: Some(prepare_job_cb::<T>),
+ can_run_job: Some(can_run_job_cb::<T>),
+ run_job: Some(run_job_cb::<T>),
+ timedout_job: Some(timedout_job_cb::<T>),
+ free_job: Some(free_job_cb::<T>),
+ };
+ /// Creates a new DRM Scheduler object
+ // TODO: Shared timeout workqueues & scores
+ pub fn new(
+ device: &impl device::RawDevice,
+ hw_submission: u32,
+ hang_limit: u32,
+ timeout_ms: usize,
+ name: &'static CStr,
+ ) -> Result<Scheduler<T>> {
+ let mut sched: UniqueArc<MaybeUninit<SchedulerInner<T>>> = UniqueArc::try_new_uninit()?;
+
+ // SAFETY: The drm_sched pointer is valid and pinned as it was just allocated above.
+ to_result(unsafe {
+ bindings::drm_sched_init(
+ addr_of_mut!((*sched.as_mut_ptr()).sched),
+ &Self::OPS,
+ hw_submission,
+ hang_limit,
+ bindings::msecs_to_jiffies(timeout_ms.try_into()?).try_into()?,
+ core::ptr::null_mut(),
+ core::ptr::null_mut(),
+ name.as_char_ptr(),
+ device.raw_device(),
+ )
+ })?;
+
+ // SAFETY: All fields of SchedulerInner are now initialized.
+ Ok(Scheduler(unsafe { sched.assume_init() }.into()))
+ }
+}
diff --git a/rust/kernel/drm/syncobj.rs b/rust/kernel/drm/syncobj.rs
new file mode 100644
index 000000000000..10eed05eb27a
--- /dev/null
+++ b/rust/kernel/drm/syncobj.rs
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM Sync Objects
+//!
+//! C header: [`include/linux/drm/drm_syncobj.h`](../../../../include/linux/drm/drm_syncobj.h)
+
+use crate::{bindings, dma_fence::*, drm, error::Result, prelude::*};
+
+/// A DRM Sync Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a drm_syncobj and we own a reference to it.
+pub struct SyncObj {
+ ptr: *mut bindings::drm_syncobj,
+}
+
+impl SyncObj {
+ /// Looks up a sync object by its handle for a given `File`.
+ pub fn lookup_handle(file: &impl drm::file::GenericFile, handle: u32) -> Result<SyncObj> {
+ // SAFETY: The arguments are all valid per the type invariants.
+ let ptr = unsafe { bindings::drm_syncobj_find(file.raw() as *mut _, handle) };
+
+ if ptr.is_null() {
+ Err(ENOENT)
+ } else {
+ Ok(SyncObj { ptr })
+ }
+ }
+
+ /// Returns the DMA fence associated with this sync object, if any.
+ pub fn fence_get(&self) -> Option<Fence> {
+ let fence = unsafe { bindings::drm_syncobj_fence_get(self.ptr) };
+ if fence.is_null() {
+ None
+ } else {
+ // SAFETY: The pointer is non-NULL and drm_syncobj_fence_get acquired an
+ // additional reference.
+ Some(unsafe { Fence::from_raw(fence) })
+ }
+ }
+
+ /// Replaces the DMA fence with a new one, or removes it if fence is None.
+ pub fn replace_fence(&self, fence: Option<&Fence>) {
+ unsafe {
+ bindings::drm_syncobj_replace_fence(
+ self.ptr,
+ fence.map_or(core::ptr::null_mut(), |a| a.raw()),
+ )
+ };
+ }
+
+ /// Adds a new timeline point to the syncobj.
+ pub fn add_point(&self, chain: FenceChain, fence: &Fence, point: u64) {
+ // SAFETY: All arguments should be valid per the respective type invariants.
+ // This takes over the FenceChain ownership.
+ unsafe { bindings::drm_syncobj_add_point(self.ptr, chain.into_raw(), fence.raw(), point) };
+ }
+}
+
+impl Drop for SyncObj {
+ fn drop(&mut self) {
+ // SAFETY: We own a reference to this syncobj.
+ unsafe { bindings::drm_syncobj_put(self.ptr) };
+ }
+}
+
+impl Clone for SyncObj {
+ fn clone(&self) -> Self {
+ // SAFETY: `ptr` is valid per the type invariant and we own a reference to it.
+ unsafe { bindings::drm_syncobj_get(self.ptr) };
+ SyncObj { ptr: self.ptr }
+ }
+}
+
+// SAFETY: drm_syncobj operations are internally locked.
+unsafe impl Sync for SyncObj {}
+unsafe impl Send for SyncObj {}