summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
Diffstat (limited to 'rust')
-rw-r--r--rust/Makefile18
-rw-r--r--rust/alloc/alloc.rs26
-rw-r--r--rust/alloc/borrow.rs498
-rw-r--r--rust/alloc/boxed.rs435
-rw-r--r--rust/alloc/collections/mod.rs3
-rw-r--r--rust/alloc/lib.rs44
-rw-r--r--rust/alloc/raw_vec.rs14
-rw-r--r--rust/alloc/slice.rs100
-rw-r--r--rust/alloc/vec/drain.rs75
-rw-r--r--rust/alloc/vec/drain_filter.rs60
-rw-r--r--rust/alloc/vec/into_iter.rs78
-rw-r--r--rust/alloc/vec/is_zero.rs73
-rw-r--r--rust/alloc/vec/mod.rs491
-rw-r--r--rust/alloc/vec/set_len_on_drop.rs28
-rw-r--r--rust/alloc/vec/spec_extend.rs170
-rw-r--r--rust/bindings/bindings_helper.h51
-rw-r--r--rust/bindings/lib.rs2
-rw-r--r--rust/compiler_builtins.rs5
-rw-r--r--rust/helpers.c515
-rw-r--r--rust/kernel/build_assert.rs1
-rw-r--r--rust/kernel/delay.rs104
-rw-r--r--rust/kernel/device.rs536
-rw-r--r--rust/kernel/dma_fence.rs532
-rw-r--r--rust/kernel/driver.rs475
-rw-r--r--rust/kernel/drm/device.rs76
-rw-r--r--rust/kernel/drm/drv.rs342
-rw-r--r--rust/kernel/drm/file.rs113
-rw-r--r--rust/kernel/drm/gem/mod.rs387
-rw-r--r--rust/kernel/drm/gem/shmem.rs385
-rw-r--r--rust/kernel/drm/ioctl.rs147
-rw-r--r--rust/kernel/drm/mm.rs309
-rw-r--r--rust/kernel/drm/mod.rs12
-rw-r--r--rust/kernel/drm/sched.rs358
-rw-r--r--rust/kernel/drm/syncobj.rs77
-rw-r--r--rust/kernel/error.rs281
-rw-r--r--rust/kernel/io_buffer.rs153
-rw-r--r--rust/kernel/io_mem.rs292
-rw-r--r--rust/kernel/io_pgtable.rs353
-rw-r--r--rust/kernel/ioctl.rs64
-rw-r--r--rust/kernel/lib.rs144
-rw-r--r--rust/kernel/module_param.rs501
-rw-r--r--rust/kernel/of.rs546
-rw-r--r--rust/kernel/platform.rs286
-rw-r--r--rust/kernel/prelude.rs10
-rw-r--r--rust/kernel/revocable.rs425
-rw-r--r--rust/kernel/soc/apple/mod.rs6
-rw-r--r--rust/kernel/soc/apple/rtkit.rs277
-rw-r--r--rust/kernel/soc/mod.rs5
-rw-r--r--rust/kernel/std_vendor.rs1
-rw-r--r--rust/kernel/sync.rs75
-rw-r--r--rust/kernel/sync/arc.rs577
-rw-r--r--rust/kernel/sync/condvar.rs142
-rw-r--r--rust/kernel/sync/guard.rs162
-rw-r--r--rust/kernel/sync/mutex.rs149
-rw-r--r--rust/kernel/sync/rcu.rs52
-rw-r--r--rust/kernel/sync/revocable.rs246
-rw-r--r--rust/kernel/sync/smutex.rs290
-rw-r--r--rust/kernel/time.rs23
-rw-r--r--rust/kernel/types.rs296
-rw-r--r--rust/kernel/user_ptr.rs175
-rw-r--r--rust/kernel/xarray.rs299
-rw-r--r--rust/macros/concat_idents.rs24
-rw-r--r--rust/macros/helpers.rs34
-rw-r--r--rust/macros/lib.rs7
-rw-r--r--rust/macros/module.rs380
-rw-r--r--rust/macros/versions.rs289
66 files changed, 12333 insertions, 771 deletions
diff --git a/rust/Makefile b/rust/Makefile
index ff70c4c916f8..aee0701acf02 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -50,6 +50,7 @@ core-cfgs = \
--cfg no_fp_fmt_parse
alloc-cfgs = \
+ --cfg no_borrow \
--cfg no_fmt \
--cfg no_global_oom_handling \
--cfg no_macros \
@@ -262,6 +263,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
# Derived from `scripts/Makefile.clang`.
BINDGEN_TARGET_x86 := x86_64-linux-gnu
+BINDGEN_TARGET_arm64 := aarch64-linux-gnu
BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
# All warnings are inhibited since GCC builds are very experimental,
@@ -359,9 +361,23 @@ rust-analyzer:
$(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) \
$(RUST_LIB_SRC) > $(objtree)/rust-project.json
+redirect-intrinsics = \
+ __eqsf2 __gesf2 __lesf2 __nesf2 __unordsf2 \
+ __unorddf2 \
+ __muloti4 __multi3 \
+ __udivmodti4 __udivti3 __umodti3
+
+ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
+ # These intrinsics are defined for ARM64 and RISCV64
+ redirect-intrinsics += \
+ __ashrti3 \
+ __ashlti3 __lshrti3
+endif
+
$(obj)/core.o: private skip_clippy = 1
$(obj)/core.o: private skip_flags = -Dunreachable_pub
-$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
+$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
+$(obj)/core.o: private rustc_target_flags = $(core-cfgs) -Aunused-imports
$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs $(obj)/target.json FORCE
$(call if_changed_dep,rustc_library)
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
index ca224a541770..0142178370e9 100644
--- a/rust/alloc/alloc.rs
+++ b/rust/alloc/alloc.rs
@@ -27,16 +27,23 @@ extern "Rust" {
// (the code expanding that attribute macro generates those functions), or to call
// the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// otherwise.
- // The rustc fork of LLVM also special-cases these function names to be able to optimize them
+ // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
- #[rustc_allocator_nounwind]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
- #[rustc_allocator_nounwind]
+ #[rustc_deallocator]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
- #[rustc_allocator_nounwind]
+ #[rustc_reallocator]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
- #[rustc_allocator_nounwind]
+ #[rustc_allocator_zeroed]
+ #[cfg_attr(not(bootstrap), rustc_nounwind)]
+ #[cfg_attr(bootstrap, rustc_allocator_nounwind)]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
}
@@ -72,11 +79,14 @@ pub use std::alloc::Global;
/// # Examples
///
/// ```
-/// use std::alloc::{alloc, dealloc, Layout};
+/// use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc(layout);
+/// if ptr.is_null() {
+/// handle_alloc_error(layout);
+/// }
///
/// *(ptr as *mut u16) = 42;
/// assert_eq!(*(ptr as *mut u16), 42);
@@ -400,13 +410,13 @@ pub mod __alloc_error_handler {
// if there is no `#[alloc_error_handler]`
#[rustc_std_internal_symbol]
- pub unsafe extern "C-unwind" fn __rdl_oom(size: usize, _align: usize) -> ! {
+ pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
panic!("memory allocation of {size} bytes failed")
}
// if there is an `#[alloc_error_handler]`
#[rustc_std_internal_symbol]
- pub unsafe extern "C-unwind" fn __rg_oom(size: usize, align: usize) -> ! {
+ pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
extern "Rust" {
#[lang = "oom"]
diff --git a/rust/alloc/borrow.rs b/rust/alloc/borrow.rs
deleted file mode 100644
index dde4957200d4..000000000000
--- a/rust/alloc/borrow.rs
+++ /dev/null
@@ -1,498 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-//! A module for working with borrowed data.
-
-#![stable(feature = "rust1", since = "1.0.0")]
-
-use core::cmp::Ordering;
-use core::hash::{Hash, Hasher};
-use core::ops::Deref;
-#[cfg(not(no_global_oom_handling))]
-use core::ops::{Add, AddAssign};
-
-#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::borrow::{Borrow, BorrowMut};
-
-use core::fmt;
-#[cfg(not(no_global_oom_handling))]
-use crate::string::String;
-
-use Cow::*;
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
-where
- B: ToOwned,
- <B as ToOwned>::Owned: 'a,
-{
- fn borrow(&self) -> &B {
- &**self
- }
-}
-
-/// A generalization of `Clone` to borrowed data.
-///
-/// Some types make it possible to go from borrowed to owned, usually by
-/// implementing the `Clone` trait. But `Clone` works only for going from `&T`
-/// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data
-/// from any borrow of a given type.
-#[cfg_attr(not(test), rustc_diagnostic_item = "ToOwned")]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub trait ToOwned {
- /// The resulting type after obtaining ownership.
- #[stable(feature = "rust1", since = "1.0.0")]
- type Owned: Borrow<Self>;
-
- /// Creates owned data from borrowed data, usually by cloning.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// let s: &str = "a";
- /// let ss: String = s.to_owned();
- ///
- /// let v: &[i32] = &[1, 2];
- /// let vv: Vec<i32> = v.to_owned();
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use = "cloning is often expensive and is not expected to have side effects"]
- fn to_owned(&self) -> Self::Owned;
-
- /// Uses borrowed data to replace owned data, usually by cloning.
- ///
- /// This is borrow-generalized version of `Clone::clone_from`.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// # #![feature(toowned_clone_into)]
- /// let mut s: String = String::new();
- /// "hello".clone_into(&mut s);
- ///
- /// let mut v: Vec<i32> = Vec::new();
- /// [1, 2][..].clone_into(&mut v);
- /// ```
- #[unstable(feature = "toowned_clone_into", reason = "recently added", issue = "41263")]
- fn clone_into(&self, target: &mut Self::Owned) {
- *target = self.to_owned();
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ToOwned for T
-where
- T: Clone,
-{
- type Owned = T;
- fn to_owned(&self) -> T {
- self.clone()
- }
-
- fn clone_into(&self, target: &mut T) {
- target.clone_from(self);
- }
-}
-
-/// A clone-on-write smart pointer.
-///
-/// The type `Cow` is a smart pointer providing clone-on-write functionality: it
-/// can enclose and provide immutable access to borrowed data, and clone the
-/// data lazily when mutation or ownership is required. The type is designed to
-/// work with general borrowed data via the `Borrow` trait.
-///
-/// `Cow` implements `Deref`, which means that you can call
-/// non-mutating methods directly on the data it encloses. If mutation
-/// is desired, `to_mut` will obtain a mutable reference to an owned
-/// value, cloning if necessary.
-///
-/// If you need reference-counting pointers, note that
-/// [`Rc::make_mut`][crate::rc::Rc::make_mut] and
-/// [`Arc::make_mut`][crate::sync::Arc::make_mut] can provide clone-on-write
-/// functionality as well.
-///
-/// # Examples
-///
-/// ```
-/// use std::borrow::Cow;
-///
-/// fn abs_all(input: &mut Cow<[i32]>) {
-/// for i in 0..input.len() {
-/// let v = input[i];
-/// if v < 0 {
-/// // Clones into a vector if not already owned.
-/// input.to_mut()[i] = -v;
-/// }
-/// }
-/// }
-///
-/// // No clone occurs because `input` doesn't need to be mutated.
-/// let slice = [0, 1, 2];
-/// let mut input = Cow::from(&slice[..]);
-/// abs_all(&mut input);
-///
-/// // Clone occurs because `input` needs to be mutated.
-/// let slice = [-1, 0, 1];
-/// let mut input = Cow::from(&slice[..]);
-/// abs_all(&mut input);
-///
-/// // No clone occurs because `input` is already owned.
-/// let mut input = Cow::from(vec![-1, 0, 1]);
-/// abs_all(&mut input);
-/// ```
-///
-/// Another example showing how to keep `Cow` in a struct:
-///
-/// ```
-/// use std::borrow::Cow;
-///
-/// struct Items<'a, X: 'a> where [X]: ToOwned<Owned = Vec<X>> {
-/// values: Cow<'a, [X]>,
-/// }
-///
-/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
-/// fn new(v: Cow<'a, [X]>) -> Self {
-/// Items { values: v }
-/// }
-/// }
-///
-/// // Creates a container from borrowed values of a slice
-/// let readonly = [1, 2];
-/// let borrowed = Items::new((&readonly[..]).into());
-/// match borrowed {
-/// Items { values: Cow::Borrowed(b) } => println!("borrowed {b:?}"),
-/// _ => panic!("expect borrowed value"),
-/// }
-///
-/// let mut clone_on_write = borrowed;
-/// // Mutates the data from slice into owned vec and pushes a new value on top
-/// clone_on_write.values.to_mut().push(3);
-/// println!("clone_on_write = {:?}", clone_on_write.values);
-///
-/// // The data was mutated. Let's check it out.
-/// match clone_on_write {
-/// Items { values: Cow::Owned(_) } => println!("clone_on_write contains owned data"),
-/// _ => panic!("expect owned data"),
-/// }
-/// ```
-#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(test), rustc_diagnostic_item = "Cow")]
-pub enum Cow<'a, B: ?Sized + 'a>
-where
- B: ToOwned,
-{
- /// Borrowed data.
- #[stable(feature = "rust1", since = "1.0.0")]
- Borrowed(#[stable(feature = "rust1", since = "1.0.0")] &'a B),
-
- /// Owned data.
- #[stable(feature = "rust1", since = "1.0.0")]
- Owned(#[stable(feature = "rust1", since = "1.0.0")] <B as ToOwned>::Owned),
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<B: ?Sized + ToOwned> Clone for Cow<'_, B> {
- fn clone(&self) -> Self {
- match *self {
- Borrowed(b) => Borrowed(b),
- Owned(ref o) => {
- let b: &B = o.borrow();
- Owned(b.to_owned())
- }
- }
- }
-
- fn clone_from(&mut self, source: &Self) {
- match (self, source) {
- (&mut Owned(ref mut dest), &Owned(ref o)) => o.borrow().clone_into(dest),
- (t, s) => *t = s.clone(),
- }
- }
-}
-
-impl<B: ?Sized + ToOwned> Cow<'_, B> {
- /// Returns true if the data is borrowed, i.e. if `to_mut` would require additional work.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(cow_is_borrowed)]
- /// use std::borrow::Cow;
- ///
- /// let cow = Cow::Borrowed("moo");
- /// assert!(cow.is_borrowed());
- ///
- /// let bull: Cow<'_, str> = Cow::Owned("...moo?".to_string());
- /// assert!(!bull.is_borrowed());
- /// ```
- #[unstable(feature = "cow_is_borrowed", issue = "65143")]
- #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
- pub const fn is_borrowed(&self) -> bool {
- match *self {
- Borrowed(_) => true,
- Owned(_) => false,
- }
- }
-
- /// Returns true if the data is owned, i.e. if `to_mut` would be a no-op.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(cow_is_borrowed)]
- /// use std::borrow::Cow;
- ///
- /// let cow: Cow<'_, str> = Cow::Owned("moo".to_string());
- /// assert!(cow.is_owned());
- ///
- /// let bull = Cow::Borrowed("...moo?");
- /// assert!(!bull.is_owned());
- /// ```
- #[unstable(feature = "cow_is_borrowed", issue = "65143")]
- #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
- pub const fn is_owned(&self) -> bool {
- !self.is_borrowed()
- }
-
- /// Acquires a mutable reference to the owned form of the data.
- ///
- /// Clones the data if it is not already owned.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::borrow::Cow;
- ///
- /// let mut cow = Cow::Borrowed("foo");
- /// cow.to_mut().make_ascii_uppercase();
- ///
- /// assert_eq!(
- /// cow,
- /// Cow::Owned(String::from("FOO")) as Cow<str>
- /// );
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned {
- match *self {
- Borrowed(borrowed) => {
- *self = Owned(borrowed.to_owned());
- match *self {
- Borrowed(..) => unreachable!(),
- Owned(ref mut owned) => owned,
- }
- }
- Owned(ref mut owned) => owned,
- }
- }
-
- /// Extracts the owned data.
- ///
- /// Clones the data if it is not already owned.
- ///
- /// # Examples
- ///
- /// Calling `into_owned` on a `Cow::Borrowed` returns a clone of the borrowed data:
- ///
- /// ```
- /// use std::borrow::Cow;
- ///
- /// let s = "Hello world!";
- /// let cow = Cow::Borrowed(s);
- ///
- /// assert_eq!(
- /// cow.into_owned(),
- /// String::from(s)
- /// );
- /// ```
- ///
- /// Calling `into_owned` on a `Cow::Owned` returns the owned data. The data is moved out of the
- /// `Cow` without being cloned.
- ///
- /// ```
- /// use std::borrow::Cow;
- ///
- /// let s = "Hello world!";
- /// let cow: Cow<str> = Cow::Owned(String::from(s));
- ///
- /// assert_eq!(
- /// cow.into_owned(),
- /// String::from(s)
- /// );
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn into_owned(self) -> <B as ToOwned>::Owned {
- match self {
- Borrowed(borrowed) => borrowed.to_owned(),
- Owned(owned) => owned,
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
-impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
-where
- B::Owned: ~const Borrow<B>,
-{
- type Target = B;
-
- fn deref(&self) -> &B {
- match *self {
- Borrowed(borrowed) => borrowed,
- Owned(ref owned) => owned.borrow(),
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<B: ?Sized> Eq for Cow<'_, B> where B: Eq + ToOwned {}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<B: ?Sized> Ord for Cow<'_, B>
-where
- B: Ord + ToOwned,
-{
- #[inline]
- fn cmp(&self, other: &Self) -> Ordering {
- Ord::cmp(&**self, &**other)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B>
-where
- B: PartialEq<C> + ToOwned,
- C: ToOwned,
-{
- #[inline]
- fn eq(&self, other: &Cow<'b, C>) -> bool {
- PartialEq::eq(&**self, &**other)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, B: ?Sized> PartialOrd for Cow<'a, B>
-where
- B: PartialOrd + ToOwned,
-{
- #[inline]
- fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> {
- PartialOrd::partial_cmp(&**self, &**other)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<B: ?Sized> fmt::Debug for Cow<'_, B>
-where
- B: fmt::Debug + ToOwned<Owned: fmt::Debug>,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match *self {
- Borrowed(ref b) => fmt::Debug::fmt(b, f),
- Owned(ref o) => fmt::Debug::fmt(o, f),
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<B: ?Sized> fmt::Display for Cow<'_, B>
-where
- B: fmt::Display + ToOwned<Owned: fmt::Display>,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match *self {
- Borrowed(ref b) => fmt::Display::fmt(b, f),
- Owned(ref o) => fmt::Display::fmt(o, f),
- }
- }
-}
-
-#[stable(feature = "default", since = "1.11.0")]
-impl<B: ?Sized> Default for Cow<'_, B>
-where
- B: ToOwned<Owned: Default>,
-{
- /// Creates an owned Cow<'a, B> with the default value for the contained owned value.
- fn default() -> Self {
- Owned(<B as ToOwned>::Owned::default())
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<B: ?Sized> Hash for Cow<'_, B>
-where
- B: Hash + ToOwned,
-{
- #[inline]
- fn hash<H: Hasher>(&self, state: &mut H) {
- Hash::hash(&**self, state)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized + ToOwned> AsRef<T> for Cow<'_, T> {
- fn as_ref(&self) -> &T {
- self
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "cow_add", since = "1.14.0")]
-impl<'a> Add<&'a str> for Cow<'a, str> {
- type Output = Cow<'a, str>;
-
- #[inline]
- fn add(mut self, rhs: &'a str) -> Self::Output {
- self += rhs;
- self
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "cow_add", since = "1.14.0")]
-impl<'a> Add<Cow<'a, str>> for Cow<'a, str> {
- type Output = Cow<'a, str>;
-
- #[inline]
- fn add(mut self, rhs: Cow<'a, str>) -> Self::Output {
- self += rhs;
- self
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "cow_add", since = "1.14.0")]
-impl<'a> AddAssign<&'a str> for Cow<'a, str> {
- fn add_assign(&mut self, rhs: &'a str) {
- if self.is_empty() {
- *self = Cow::Borrowed(rhs)
- } else if !rhs.is_empty() {
- if let Cow::Borrowed(lhs) = *self {
- let mut s = String::with_capacity(lhs.len() + rhs.len());
- s.push_str(lhs);
- *self = Cow::Owned(s);
- }
- self.to_mut().push_str(rhs);
- }
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "cow_add", since = "1.14.0")]
-impl<'a> AddAssign<Cow<'a, str>> for Cow<'a, str> {
- fn add_assign(&mut self, rhs: Cow<'a, str>) {
- if self.is_empty() {
- *self = rhs
- } else if !rhs.is_empty() {
- if let Cow::Borrowed(lhs) = *self {
- let mut s = String::with_capacity(lhs.len() + rhs.len());
- s.push_str(lhs);
- *self = Cow::Owned(s);
- }
- self.to_mut().push_str(&rhs);
- }
- }
-}
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
index 5e7518bffbfb..d4a03edd7d89 100644
--- a/rust/alloc/boxed.rs
+++ b/rust/alloc/boxed.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
-//! A pointer type for heap allocation.
+//! The `Box<T>` type for heap allocation.
//!
//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
@@ -124,7 +124,21 @@
//! definition is just using `T*` can lead to undefined behavior, as
//! described in [rust-lang/unsafe-code-guidelines#198][ucg#198].
//!
+//! # Considerations for unsafe code
+//!
+//! **Warning: This section is not normative and is subject to change, possibly
+//! being relaxed in the future! It is a simplified summary of the rules
+//! currently implemented in the compiler.**
+//!
+//! The aliasing rules for `Box<T>` are the same as for `&mut T`. `Box<T>`
+//! asserts uniqueness over its content. Using raw pointers derived from a box
+//! after that box has been mutated through, moved or borrowed as `&mut T`
+//! is not allowed. For more guidance on working with box from unsafe code, see
+//! [rust-lang/unsafe-code-guidelines#326][ucg#326].
+//!
+//!
//! [ucg#198]: https://github.com/rust-lang/unsafe-code-guidelines/issues/198
+//! [ucg#326]: https://github.com/rust-lang/unsafe-code-guidelines/issues/326
//! [dereferencing]: core::ops::Deref
//! [`Box::<T>::from_raw(value)`]: Box::from_raw
//! [`Global`]: crate::alloc::Global
@@ -139,6 +153,7 @@ use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
use core::convert::{From, TryFrom};
+use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
@@ -163,6 +178,8 @@ use crate::raw_vec::RawVec;
#[cfg(not(no_global_oom_handling))]
use crate::str::from_boxed_utf8_unchecked;
#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+#[cfg(not(no_global_oom_handling))]
use crate::vec::Vec;
#[cfg(not(no_thin))]
@@ -196,12 +213,13 @@ impl<T> Box<T> {
/// ```
/// let five = Box::new(5);
/// ```
- #[cfg(not(no_global_oom_handling))]
+ #[cfg(all(not(no_global_oom_handling)))]
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
pub fn new(x: T) -> Self {
- box x
+ #[rustc_box]
+ Box::new(x)
}
/// Constructs a new box with uninitialized contents.
@@ -256,14 +274,21 @@ impl<T> Box<T> {
Self::new_zeroed_in(Global)
}
- /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
+ /// Constructs a new `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
/// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin(x)`
+ /// does the same as <code>[Box::into_pin]\([Box::new]\(x))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new`].
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "pin", since = "1.33.0")]
#[must_use]
#[inline(always)]
pub fn pin(x: T) -> Pin<Box<T>> {
- (box x).into()
+ (#[rustc_box]
+ Box::new(x))
+ .into()
}
/// Allocates memory on the heap then places `x` into it,
@@ -543,8 +568,13 @@ impl<T, A: Allocator> Box<T, A> {
unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
}
- /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement `Unpin`, then
+ /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then
/// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin_in(x, alloc)`
+ /// does the same as <code>[Box::into_pin]\([Box::new_in]\(x, alloc))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T, A>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
@@ -926,6 +956,7 @@ impl<T: ?Sized> Box<T> {
/// [`Layout`]: crate::Layout
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
+ #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `Box`"]
pub unsafe fn from_raw(raw: *mut T) -> Self {
unsafe { Self::from_raw_in(raw, Global) }
}
@@ -1160,19 +1191,44 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
}
- /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
///
/// This conversion does not allocate on the heap and happens in place.
///
/// This is also available via [`From`].
- #[unstable(feature = "box_into_pin", issue = "62370")]
+ ///
+ /// Constructing and pinning a `Box` with <code>Box::into_pin([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `into_pin` method is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
+ ///
+ /// # Notes
+ ///
+ /// It's not recommended that crates add an impl like `From<Box<T>> for Pin<T>`,
+ /// as it'll introduce an ambiguity when calling `Pin::from`.
+ /// A demonstration of such a poor impl is shown below.
+ ///
+ /// ```compile_fail
+ /// # use std::pin::Pin;
+ /// struct Foo; // A type defined in this crate.
+ /// impl From<Box<()>> for Pin<Foo> {
+ /// fn from(_: Box<()>) -> Pin<Foo> {
+ /// Pin::new(Foo)
+ /// }
+ /// }
+ ///
+ /// let foo = Box::new(());
+ /// let bar = Pin::from(foo);
+ /// ```
+ #[stable(feature = "box_into_pin", since = "1.63.0")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
pub const fn into_pin(boxed: Self) -> Pin<Self>
where
A: 'static,
{
// It's not possible to move or replace the insides of a `Pin<Box<T>>`
- // when `T: !Unpin`, so it's safe to pin it directly without any
+ // when `T: !Unpin`, so it's safe to pin it directly without any
// additional requirements.
unsafe { Pin::new_unchecked(boxed) }
}
@@ -1190,7 +1246,8 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
fn default() -> Self {
- box T::default()
+ #[rustc_box]
+ Box::new(T::default())
}
}
@@ -1408,9 +1465,17 @@ impl<T: ?Sized, A: Allocator> const From<Box<T, A>> for Pin<Box<T, A>>
where
A: 'static,
{
- /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
///
/// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`Box::into_pin`].
+ ///
+ /// Constructing and pinning a `Box` with <code><Pin<Box\<T>>>::from([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `From` implementation is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
fn from(boxed: Box<T, A>) -> Self {
Box::into_pin(boxed)
}
@@ -1422,7 +1487,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
- /// and performs a copy of `slice`.
+ /// and performs a copy of `slice` and its contents.
///
/// # Examples
/// ```rust
@@ -1554,10 +1619,27 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> {
/// println!("{boxed:?}");
/// ```
fn from(array: [T; N]) -> Box<[T]> {
- box array
+ #[rustc_box]
+ Box::new(array)
}
}
+/// Casts a boxed slice to a boxed array.
+///
+/// # Safety
+///
+/// `boxed_slice.len()` must be exactly `N`.
+unsafe fn boxed_slice_as_array_unchecked<T, A: Allocator, const N: usize>(
+ boxed_slice: Box<[T], A>,
+) -> Box<[T; N], A> {
+ debug_assert_eq!(boxed_slice.len(), N);
+
+ let (ptr, alloc) = Box::into_raw_with_allocator(boxed_slice);
+ // SAFETY: Pointer and allocator came from an existing box,
+ // and our safety condition requires that the length is exactly `N`
+ unsafe { Box::from_raw_in(ptr as *mut [T; N], alloc) }
+}
+
#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
type Error = Box<[T]>;
@@ -1573,13 +1655,46 @@ impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
/// `boxed_slice.len()` does not equal `N`.
fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
if boxed_slice.len() == N {
- Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
+ Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
} else {
Err(boxed_slice)
}
}
}
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "boxed_array_try_from_vec", since = "1.66.0")]
+impl<T, const N: usize> TryFrom<Vec<T>> for Box<[T; N]> {
+ type Error = Vec<T>;
+
+ /// Attempts to convert a `Vec<T>` into a `Box<[T; N]>`.
+ ///
+ /// Like [`Vec::into_boxed_slice`], this is in-place if `vec.capacity() == N`,
+ /// but will require a reallocation otherwise.
+ ///
+ /// # Errors
+ ///
+ /// Returns the original `Vec<T>` in the `Err` variant if
+ /// `boxed_slice.len()` does not equal `N`.
+ ///
+ /// # Examples
+ ///
+ /// This can be used with [`vec!`] to create an array on the heap:
+ ///
+ /// ```
+ /// let state: Box<[f32; 100]> = vec![1.0; 100].try_into().unwrap();
+ /// assert_eq!(state.len(), 100);
+ /// ```
+ fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> {
+ if vec.len() == N {
+ let boxed_slice = vec.into_boxed_slice();
+ Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
+ } else {
+ Err(vec)
+ }
+ }
+}
+
impl<A: Allocator> Box<dyn Any, A> {
/// Attempt to downcast the box to a concrete type.
///
@@ -1973,8 +2088,7 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
* could have a method to project a Pin<T> from it.
*/
#[stable(feature = "pin", since = "1.33.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const Unpin for Box<T, A> where A: 'static {}
+impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
@@ -2026,3 +2140,292 @@ impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
(**self).size_hint()
}
}
+
+impl dyn Error {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
+ if self.is::<T>() {
+ unsafe {
+ let raw: *mut dyn Error = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl dyn Error + Send {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
+ })
+ }
+}
+
+impl dyn Error + Send + Sync {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send + Sync` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
+ })
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
+ /// Converts a type of [`Error`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error>::from(an_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
+ /// dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// unsafe impl Send for AnError {}
+ ///
+ /// unsafe impl Sync for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<String> for Box<dyn Error + Send + Sync> {
+ /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: String) -> Box<dyn Error + Send + Sync> {
+ struct StringError(String);
+
+ impl Error for StringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ &self.0
+ }
+ }
+
+ impl fmt::Display for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+ }
+
+ // Purposefully skip printing "StringError(..)"
+ impl fmt::Debug for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.0, f)
+ }
+ }
+
+ Box::new(StringError(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<String> for Box<dyn Error> {
+ /// Converts a [`String`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(str_err: String) -> Box<dyn Error> {
+ let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
+ let err2: Box<dyn Error> = err1;
+ err2
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<&str> for Box<dyn Error> {
+ /// Converts a [`str`] into a box of dyn [`Error`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: &str) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'a, str>) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[stable(feature = "box_error", since = "1.8.0")]
+impl<T: core::error::Error> core::error::Error for Box<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ core::error::Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn core::error::Error> {
+ core::error::Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ core::error::Error::source(&**self)
+ }
+}
diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs
index 1eec265b28f8..da6154412416 100644
--- a/rust/alloc/collections/mod.rs
+++ b/rust/alloc/collections/mod.rs
@@ -154,3 +154,6 @@ trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl core::error::Error for TryReserveError {}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
index b2a13a53f19a..cc3850c3b519 100644
--- a/rust/alloc/lib.rs
+++ b/rust/alloc/lib.rs
@@ -58,10 +58,6 @@
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
-// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
-// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
-#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
@@ -75,10 +71,16 @@
any(not(feature = "miri-test-libstd"), test, doctest),
no_global_oom_handling,
not(no_global_oom_handling),
+ not(no_rc),
+ not(no_sync),
target_has_atomic = "ptr"
))]
#![no_std]
#![needs_allocator]
+// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
@@ -88,10 +90,10 @@
#![allow(explicit_outlives_requirements)]
//
// Library features:
-#![cfg_attr(not(no_global_oom_handling), feature(alloc_c_string))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
+#![feature(array_into_iter_constructors)]
#![feature(array_methods)]
#![feature(array_windows)]
#![feature(assert_matches)]
@@ -99,8 +101,8 @@
#![feature(coerce_unsized)]
#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
#![feature(const_box)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
-#![feature(const_cow_is_borrowed)]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
+#![cfg_attr(not(no_borrow), feature(const_cow_is_borrowed))]
#![feature(const_convert)]
#![feature(const_size_of_val)]
#![feature(const_align_of_val)]
@@ -108,13 +110,14 @@
#![feature(const_maybe_uninit_write)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_refs_to_cell)]
-#![feature(core_c_str)]
#![feature(core_intrinsics)]
-#![feature(core_ffi_c)]
#![feature(const_eval_select)]
#![feature(const_pin)]
+#![feature(const_waker)]
#![feature(cstr_from_bytes_until_nul)]
#![feature(dispatch_from_dyn)]
+#![feature(error_generic_member_access)]
+#![feature(error_in_core)]
#![feature(exact_size_is_empty)]
#![feature(extend_one)]
#![feature(fmt_internals)]
@@ -122,16 +125,24 @@
#![feature(hasher_prefixfree_extras)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
+#![feature(iter_next_chunk)]
#![feature(layout_for_ptr)]
#![feature(maybe_uninit_slice)]
+#![feature(maybe_uninit_uninit_array)]
+#![feature(maybe_uninit_uninit_array_transpose)]
#![cfg_attr(test, feature(new_uninit))]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
+#![feature(pointer_byte_offsets)]
+#![feature(provide_any)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
#![feature(receiver_trait)]
+#![feature(saturating_int_impl)]
#![feature(set_ptr_value)]
+#![feature(sized_type_properties)]
+#![feature(slice_from_ptr_range)]
#![feature(slice_group_by)]
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
@@ -145,12 +156,13 @@
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
+#![feature(utf8_chunks)]
+#![feature(std_internals)]
//
// Language features:
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
-#![feature(box_syntax)]
#![feature(cfg_sanitize)]
#![feature(const_deref)]
#![feature(const_mut_refs)]
@@ -164,19 +176,20 @@
#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
-#![feature(let_else)]
#![feature(min_specialization)]
#![feature(negative_impls)]
#![feature(never_type)]
-#![feature(nll)] // Not necessary, but here to test the `nll` feature.
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
+#![feature(pointer_is_aligned)]
#![feature(slice_internals)]
#![feature(staged_api)]
+#![feature(stmt_expr_attributes)]
#![cfg_attr(test, feature(test))]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(c_unwind)]
+#![feature(with_negative_coherence)]
//
// Rustdoc features:
#![feature(doc_cfg)]
@@ -216,9 +229,10 @@ pub mod boxed;
mod boxed {
pub use std::boxed::Box;
}
+#[cfg(not(no_borrow))]
pub mod borrow;
pub mod collections;
-#[cfg(not(no_global_oom_handling))]
+#[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))]
pub mod ffi;
#[cfg(not(no_fmt))]
pub mod fmt;
@@ -230,9 +244,9 @@ pub mod str;
#[cfg(not(no_string))]
pub mod string;
#[cfg(not(no_sync))]
-#[cfg(target_has_atomic = "ptr")]
+#[cfg(all(not(no_rc), target_has_atomic = "ptr"))]
pub mod sync;
-#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
+#[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod task;
#[cfg(test)]
mod tests;
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
index eb77db5def55..f02faff67a80 100644
--- a/rust/alloc/raw_vec.rs
+++ b/rust/alloc/raw_vec.rs
@@ -5,7 +5,7 @@
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
@@ -177,7 +177,7 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if mem::size_of::<T>() == 0 || capacity == 0 {
+ if T::IS_ZST || capacity == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
@@ -212,7 +212,7 @@ impl<T, A: Allocator> RawVec<T, A> {
fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if mem::size_of::<T>() == 0 || capacity == 0 {
+ if T::IS_ZST || capacity == 0 {
return Ok(Self::new_in(alloc));
}
@@ -262,7 +262,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
- if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
+ if T::IS_ZST { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -271,7 +271,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if mem::size_of::<T>() == 0 || self.cap == 0 {
+ if T::IS_ZST || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
@@ -419,7 +419,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
@@ -445,7 +445,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs
index e444e97fa145..467935cf9a06 100644
--- a/rust/alloc/slice.rs
+++ b/rust/alloc/slice.rs
@@ -1,84 +1,14 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
-//! A dynamically-sized view into a contiguous sequence, `[T]`.
+//! Utilities for the slice primitive type.
//!
//! *[See also the slice primitive type](slice).*
//!
-//! Slices are a view into a block of memory represented as a pointer and a
-//! length.
+//! Most of the structs in this module are iterator types which can only be created
+//! using a certain function. For example, `slice.iter()` yields an [`Iter`].
//!
-//! ```
-//! // slicing a Vec
-//! let vec = vec![1, 2, 3];
-//! let int_slice = &vec[..];
-//! // coercing an array to a slice
-//! let str_slice: &[&str] = &["one", "two", "three"];
-//! ```
-//!
-//! Slices are either mutable or shared. The shared slice type is `&[T]`,
-//! while the mutable slice type is `&mut [T]`, where `T` represents the element
-//! type. For example, you can mutate the block of memory that a mutable slice
-//! points to:
-//!
-//! ```
-//! let x = &mut [1, 2, 3];
-//! x[1] = 7;
-//! assert_eq!(x, &[1, 7, 3]);
-//! ```
-//!
-//! Here are some of the things this module contains:
-//!
-//! ## Structs
-//!
-//! There are several structs that are useful for slices, such as [`Iter`], which
-//! represents iteration over a slice.
-//!
-//! ## Trait Implementations
-//!
-//! There are several implementations of common traits for slices. Some examples
-//! include:
-//!
-//! * [`Clone`]
-//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
-//! * [`Hash`] - for slices whose element type is [`Hash`].
-//!
-//! ## Iteration
-//!
-//! The slices implement `IntoIterator`. The iterator yields references to the
-//! slice elements.
-//!
-//! ```
-//! let numbers = &[0, 1, 2];
-//! for n in numbers {
-//! println!("{n} is a number!");
-//! }
-//! ```
-//!
-//! The mutable slice yields mutable references to the elements:
-//!
-//! ```
-//! let mut scores = [7, 8, 9];
-//! for score in &mut scores[..] {
-//! *score += 1;
-//! }
-//! ```
-//!
-//! This iterator yields mutable references to the slice's elements, so while
-//! the element type of the slice is `i32`, the element type of the iterator is
-//! `&mut i32`.
-//!
-//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
-//! iterators.
-//! * Further methods that return iterators are [`.split`], [`.splitn`],
-//! [`.chunks`], [`.windows`] and more.
-//!
-//! [`Hash`]: core::hash::Hash
-//! [`.iter`]: slice::iter
-//! [`.iter_mut`]: slice::iter_mut
-//! [`.split`]: slice::split
-//! [`.splitn`]: slice::splitn
-//! [`.chunks`]: slice::chunks
-//! [`.windows`]: slice::windows
+//! A few functions are provided to create a slice from a value reference
+//! or from a raw pointer.
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
@@ -88,9 +18,7 @@ use core::borrow::{Borrow, BorrowMut};
#[cfg(not(no_global_oom_handling))]
use core::cmp::Ordering::{self, Less};
#[cfg(not(no_global_oom_handling))]
-use core::mem;
-#[cfg(not(no_global_oom_handling))]
-use core::mem::size_of;
+use core::mem::{self, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ptr;
@@ -116,6 +44,8 @@ pub use core::slice::EscapeAscii;
pub use core::slice::SliceIndex;
#[stable(feature = "from_ref", since = "1.28.0")]
pub use core::slice::{from_mut, from_ref};
+#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
+pub use core::slice::{from_mut_ptr_range, from_ptr_range};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "rust1", since = "1.0.0")]
@@ -275,7 +205,7 @@ impl<T> [T] {
where
T: Ord,
{
- merge_sort(self, |a, b| a.lt(b));
+ merge_sort(self, T::lt);
}
/// Sorts the slice with a comparator function.
@@ -836,14 +766,14 @@ impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
////////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Borrow<[T]> for Vec<T> {
+impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
fn borrow(&self) -> &[T] {
&self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> BorrowMut<[T]> for Vec<T> {
+impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
fn borrow_mut(&mut self) -> &mut [T] {
&mut self[..]
}
@@ -1024,7 +954,7 @@ where
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
unsafe {
- let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+ let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
@@ -1038,12 +968,12 @@ where
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
- *ptr = unsafe { ptr.offset(1) };
+ *ptr = unsafe { ptr.add(1) };
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
- *ptr = unsafe { ptr.offset(-1) };
+ *ptr = unsafe { ptr.sub(1) };
*ptr
}
@@ -1088,7 +1018,7 @@ where
const MIN_RUN: usize = 10;
// Sorting has no meaningful behavior on zero-sized types.
- if size_of::<T>() == 0 {
+ if T::IS_ZST {
return;
}
diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs
index b6a5f98e4fcd..3594ad890c3d 100644
--- a/rust/alloc/vec/drain.rs
+++ b/rust/alloc/vec/drain.rs
@@ -3,7 +3,7 @@
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
-use core::mem;
+use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::ptr::{self, NonNull};
use core::slice::{self};
@@ -67,6 +67,77 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
+ // ^-- start \_________/-- unyielded_len \____/-- self.tail_len
+ // ^-- unyielded_ptr ^-- tail
+ //
+ // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
+ // Here we want to
+ // 1. Move [unyielded] to `start`
+ // 2. Move [tail] to a new start at `start + len(unyielded)`
+ // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
+ // a. In case of ZST, this is the only thing we want to do
+ // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ let source_vec = this.vec.as_mut();
+
+ let start = source_vec.len();
+ let tail = this.tail_start;
+
+ let unyielded_len = this.iter.len();
+ let unyielded_ptr = this.iter.as_slice().as_ptr();
+
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move {
+ let start_ptr = source_vec.as_mut_ptr().add(start);
+
+ // memmove back unyielded elements
+ if unyielded_ptr != start_ptr {
+ let src = unyielded_ptr;
+ let dst = start_ptr;
+
+ ptr::copy(src, dst, unyielded_len);
+ }
+
+ // memmove back untouched tail
+ if tail != (start + unyielded_len) {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = start_ptr.add(unyielded_len);
+ ptr::copy(src, dst, this.tail_len);
+ }
+ }
+
+ source_vec.set_len(start + unyielded_len + this.tail_len);
+ }
+ }
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
@@ -133,7 +204,7 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
let mut vec = self.vec;
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
index b04fce041622..4b019220657d 100644
--- a/rust/alloc/vec/drain_filter.rs
+++ b/rust/alloc/vec/drain_filter.rs
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::{Allocator, Global};
-use core::ptr::{self};
-use core::slice::{self};
+use core::mem::{self, ManuallyDrop};
+use core::ptr;
+use core::slice;
use super::Vec;
@@ -56,6 +57,61 @@ where
pub fn allocator(&self) -> &A {
self.vec.allocator()
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain_filter(|_| true);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // _____________________/-- old_len
+ // / \
+ // [kept] [yielded] [tail]
+ // \_______/ ^-- idx
+ // \-- del
+ //
+ // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
+ //
+ // 1. Move [tail] after [kept]
+ // 2. Update length of the original vec to `old_len - del`
+ // a. In case of ZST, this is the only thing we want to do
+ // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move && this.idx < this.old_len && this.del > 0 {
+ let ptr = this.vec.as_mut_ptr();
+ let src = ptr.add(this.idx);
+ let dst = src.sub(this.del);
+ let tail_len = this.old_len - this.idx;
+ src.copy_to(dst, tail_len);
+ }
+
+ let new_len = this.old_len - this.del;
+ this.vec.set_len(new_len);
+ }
+ }
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
index f7a50e76691e..a8a2a8b66bfd 100644
--- a/rust/alloc/vec/into_iter.rs
+++ b/rust/alloc/vec/into_iter.rs
@@ -4,13 +4,13 @@
use super::AsVecIntoIter;
use crate::alloc::{Allocator, Global};
use crate::raw_vec::RawVec;
+use core::array;
use core::fmt;
-use core::intrinsics::arith_offset;
use core::iter::{
FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@@ -97,13 +97,16 @@ impl<T, A: Allocator> IntoIter<T, A> {
}
/// Drops remaining elements and relinquishes the backing allocation.
+ /// This method guarantees it won't panic before relinquishing
+ /// the backing allocation.
///
/// This is roughly equivalent to the following, but more efficient
///
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
+ /// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
/// (&mut into_iter).for_each(core::mem::drop);
- /// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
+ /// std::mem::forget(into_iter);
/// ```
///
/// This method is used by in-place iteration, refer to the vec::in_place_collect
@@ -120,6 +123,8 @@ impl<T, A: Allocator> IntoIter<T, A> {
self.ptr = self.buf.as_ptr();
self.end = self.buf.as_ptr();
+ // Dropping the remaining elements can panic, so this needs to be
+ // done only after updating the other fields.
unsafe {
ptr::drop_in_place(remaining);
}
@@ -150,19 +155,19 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- if self.ptr as *const _ == self.end {
+ if self.ptr == self.end {
None
- } else if mem::size_of::<T>() == 0 {
+ } else if T::IS_ZST {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
+ self.ptr = self.ptr.wrapping_byte_add(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
- self.ptr = unsafe { self.ptr.offset(1) };
+ self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
@@ -170,7 +175,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = if mem::size_of::<T>() == 0 {
+ let exact = if T::IS_ZST {
self.end.addr().wrapping_sub(self.ptr.addr())
} else {
unsafe { self.end.sub_ptr(self.ptr) }
@@ -182,11 +187,11 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
// effectively results in unsigned pointers representing positions 0..usize::MAX,
// which is valid for ZSTs.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
+ self.ptr = self.ptr.wrapping_byte_add(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
@@ -206,6 +211,43 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
self.len()
}
+ #[inline]
+ fn next_chunk<const N: usize>(&mut self) -> Result<[T; N], core::array::IntoIter<T, N>> {
+ let mut raw_ary = MaybeUninit::uninit_array();
+
+ let len = self.len();
+
+ if T::IS_ZST {
+ if len < N {
+ self.forget_remaining_elements();
+ // Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
+ return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
+ }
+
+ self.ptr = self.ptr.wrapping_byte_add(N);
+ // Safety: ditto
+ return Ok(unsafe { raw_ary.transpose().assume_init() });
+ }
+
+ if len < N {
+ // Safety: `len` indicates that this many elements are available and we just checked that
+ // it fits into the array.
+ unsafe {
+ ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, len);
+ self.forget_remaining_elements();
+ return Err(array::IntoIter::new_unchecked(raw_ary, 0..len));
+ }
+ }
+
+ // Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize
+ // the array.
+ return unsafe {
+ ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N);
+ self.ptr = self.ptr.add(N);
+ Ok(raw_ary.transpose().assume_init())
+ };
+ }
+
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
where
Self: TrustedRandomAccessNoCoerce,
@@ -219,7 +261,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
unsafe {
- if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
+ if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
}
}
}
@@ -230,14 +272,14 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
- } else if mem::size_of::<T>() == 0 {
+ } else if T::IS_ZST {
// See above for why 'ptr.offset' isn't used
- self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
+ self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
- self.end = unsafe { self.end.offset(-1) };
+ self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
@@ -246,14 +288,12 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
let step_size = self.len().min(n);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// SAFETY: same as for advance_by()
- self.end = unsafe {
- arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
- }
+ self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: same as for advance_by()
- self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
+ self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
diff --git a/rust/alloc/vec/is_zero.rs b/rust/alloc/vec/is_zero.rs
index 377f3d172777..426bb2c9f6ff 100644
--- a/rust/alloc/vec/is_zero.rs
+++ b/rust/alloc/vec/is_zero.rs
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+use core::num::{Saturating, Wrapping};
use crate::boxed::Box;
@@ -19,12 +20,14 @@ macro_rules! impl_is_zero {
};
}
+impl_is_zero!(i8, |x| x == 0); // It is needed to impl for arrays and tuples of i8.
impl_is_zero!(i16, |x| x == 0);
impl_is_zero!(i32, |x| x == 0);
impl_is_zero!(i64, |x| x == 0);
impl_is_zero!(i128, |x| x == 0);
impl_is_zero!(isize, |x| x == 0);
+impl_is_zero!(u8, |x| x == 0); // It is needed to impl for arrays and tuples of u8.
impl_is_zero!(u16, |x| x == 0);
impl_is_zero!(u32, |x| x == 0);
impl_is_zero!(u64, |x| x == 0);
@@ -56,15 +59,41 @@ unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
fn is_zero(&self) -> bool {
// Because this is generated as a runtime check, it's not obvious that
// it's worth doing if the array is really long. The threshold here
- // is largely arbitrary, but was picked because as of 2022-05-01 LLVM
- // can const-fold the check in `vec![[0; 32]; n]` but not in
- // `vec![[0; 64]; n]`: https://godbolt.org/z/WTzjzfs5b
+ // is largely arbitrary, but was picked because as of 2022-07-01 LLVM
+ // fails to const-fold the check in `vec![[1; 32]; n]`
+ // See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
// Feel free to tweak if you have better evidence.
- N <= 32 && self.iter().all(IsZero::is_zero)
+ N <= 16 && self.iter().all(IsZero::is_zero)
}
}
+// This is recursive macro.
+macro_rules! impl_for_tuples {
+ // Stopper
+ () => {
+ // No use for implementing for empty tuple because it is ZST.
+ };
+ ($first_arg:ident $(,$rest:ident)*) => {
+ unsafe impl <$first_arg: IsZero, $($rest: IsZero,)*> IsZero for ($first_arg, $($rest,)*){
+ #[inline]
+ fn is_zero(&self) -> bool{
+ // Destructure tuple to N references
+ // Rust allows to hide generic params by local variable names.
+ #[allow(non_snake_case)]
+ let ($first_arg, $($rest,)*) = self;
+
+ $first_arg.is_zero()
+ $( && $rest.is_zero() )*
+ }
+ }
+
+ impl_for_tuples!($($rest),*);
+ }
+}
+
+impl_for_tuples!(A, B, C, D, E, F, G, H);
+
// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
// For fat pointers, the bytes that would be the pointer metadata in the `Some`
// variant are padding in the `None` variant, so ignoring them and
@@ -118,3 +147,39 @@ impl_is_zero_option_of_nonzero!(
NonZeroUsize,
NonZeroIsize,
);
+
+unsafe impl<T: IsZero> IsZero for Wrapping<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
+
+unsafe impl<T: IsZero> IsZero for Saturating<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
+
+macro_rules! impl_for_optional_bool {
+ ($($t:ty,)+) => {$(
+ unsafe impl IsZero for $t {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ // SAFETY: This is *not* a stable layout guarantee, but
+ // inside `core` we're allowed to rely on the current rustc
+ // behaviour that options of bools will be one byte with
+ // no padding, so long as they're nested less than 254 deep.
+ let raw: u8 = unsafe { core::mem::transmute(*self) };
+ raw == 0
+ }
+ }
+ )+};
+}
+impl_for_optional_bool! {
+ Option<bool>,
+ Option<Option<bool>>,
+ Option<Option<Option<bool>>>,
+ // Could go further, but not worth the metadata overhead
+}
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index 8ac6c1e3b2a8..da65deb296cd 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -61,17 +61,18 @@ use core::cmp::Ordering;
use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::{arith_offset, assume};
+use core::intrinsics::assume;
use core::iter;
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice::{self, SliceIndex};
use crate::alloc::{Allocator, Global};
+#[cfg(not(no_borrow))]
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
use crate::collections::TryReserveError;
@@ -94,6 +95,7 @@ pub use self::drain::Drain;
mod drain;
+#[cfg(not(no_borrow))]
#[cfg(not(no_global_oom_handling))]
mod cow;
@@ -120,14 +122,12 @@ use self::spec_from_elem::SpecFromElem;
#[cfg(not(no_global_oom_handling))]
mod spec_from_elem;
-#[cfg(not(no_global_oom_handling))]
use self::set_len_on_drop::SetLenOnDrop;
-#[cfg(not(no_global_oom_handling))]
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::InPlaceDrop;
+use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@@ -147,7 +147,8 @@ mod spec_from_iter;
#[cfg(not(no_global_oom_handling))]
use self::spec_extend::SpecExtend;
-#[cfg(not(no_global_oom_handling))]
+use self::spec_extend::TrySpecExtend;
+
mod spec_extend;
/// A contiguous growable array type, written as `Vec<T>`, short for 'vector'.
@@ -427,17 +428,25 @@ impl<T> Vec<T> {
Vec { buf: RawVec::NEW, len: 0 }
}
- /// Constructs a new, empty `Vec<T>` with the specified capacity.
+ /// Constructs a new, empty `Vec<T>` with at least the specified capacity.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Panics
///
@@ -450,19 +459,24 @@ impl<T> Vec<T> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
/// assert_eq!(vec.len(), 11);
/// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<()>::with_capacity(10);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
@@ -472,17 +486,25 @@ impl<T> Vec<T> {
Self::with_capacity_in(capacity, Global)
}
- /// Tries to construct a new, empty `Vec<T>` with the specified capacity.
+ /// Tries to construct a new, empty `Vec<T>` with at least the specified capacity.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Examples
///
@@ -491,22 +513,24 @@ impl<T> Vec<T> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
/// assert_eq!(vec.len(), 11);
/// assert!(vec.capacity() >= 11);
///
- /// let mut result = Vec::try_with_capacity(usize::MAX);
- /// assert!(result.is_err());
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<()>::try_with_capacity(10).unwrap();
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[inline]
#[stable(feature = "kernel", since = "1.0.0")]
@@ -514,15 +538,13 @@ impl<T> Vec<T> {
Self::try_with_capacity_in(capacity, Global)
}
- /// Creates a `Vec<T>` directly from the raw components of another vector.
+ /// Creates a `Vec<T>` directly from a pointer, a capacity, and a length.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
- /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
- /// (at least, it's highly likely to be incorrect if it wasn't).
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -531,6 +553,14 @@ impl<T> Vec<T> {
/// to be the same size as the pointer was allocated with. (Because similar to
/// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is normally **not** safe
@@ -573,8 +603,8 @@ impl<T> Vec<T> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -582,6 +612,32 @@ impl<T> Vec<T> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{AllocError, Allocator, Global, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ ///
+ /// let vec = unsafe {
+ /// let mem = match Global.allocate(layout) {
+ /// Ok(mem) => mem.cast::<u32>().as_ptr(),
+ /// Err(AllocError) => return,
+ /// };
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts_in(mem, 1, 16, Global)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
@@ -610,18 +666,26 @@ impl<T, A: Allocator> Vec<T, A> {
Vec { buf: RawVec::new_in(alloc), len: 0 }
}
- /// Constructs a new, empty `Vec<T, A>` with the specified capacity with the provided
- /// allocator.
+ /// Constructs a new, empty `Vec<T, A>` with at least the specified capacity
+ /// with the provided allocator.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Panics
///
@@ -651,6 +715,11 @@ impl<T, A: Allocator> Vec<T, A> {
/// vec.push(11);
/// assert_eq!(vec.len(), 11);
/// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<(), System>::with_capacity_in(10, System);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
@@ -659,18 +728,26 @@ impl<T, A: Allocator> Vec<T, A> {
Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
}
- /// Tries to construct a new, empty `Vec<T, A>` with the specified capacity
+ /// Tries to construct a new, empty `Vec<T, A>` with at least the specified capacity
/// with the provided allocator.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Examples
///
@@ -697,8 +774,10 @@ impl<T, A: Allocator> Vec<T, A> {
/// assert_eq!(vec.len(), 11);
/// assert!(vec.capacity() >= 11);
///
- /// let mut result = Vec::try_with_capacity_in(usize::MAX, System);
- /// assert!(result.is_err());
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<(), System>::try_with_capacity_in(10, System).unwrap();
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[inline]
#[stable(feature = "kernel", since = "1.0.0")]
@@ -706,21 +785,30 @@ impl<T, A: Allocator> Vec<T, A> {
Ok(Vec { buf: RawVec::try_with_capacity_in(capacity, alloc)?, len: 0 })
}
- /// Creates a `Vec<T, A>` directly from the raw components of another vector.
+ /// Creates a `Vec<T, A>` directly from a pointer, a capacity, a length,
+ /// and an allocator.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
- /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
- /// (at least, it's highly likely to be incorrect if it wasn't).
- /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
/// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
- /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to [*fit*] the layout size that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T, A>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is **not** safe
@@ -738,6 +826,7 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// [`String`]: crate::string::String
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ /// [*fit*]: crate::alloc::Allocator#memory-fitting
///
/// # Examples
///
@@ -767,8 +856,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -776,6 +865,29 @@ impl<T, A: Allocator> Vec<T, A> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ /// let vec = unsafe {
+ /// let mem = alloc(layout).cast::<u32>();
+ /// if mem.is_null() {
+ /// return;
+ /// }
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts(mem, 1, 16)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
@@ -868,13 +980,14 @@ impl<T, A: Allocator> Vec<T, A> {
(ptr, len, capacity, alloc)
}
- /// Returns the number of elements the vector can hold without
+ /// Returns the total number of elements the vector can hold without
/// reallocating.
///
/// # Examples
///
/// ```
- /// let vec: Vec<i32> = Vec::with_capacity(10);
+ /// let mut vec: Vec<i32> = Vec::with_capacity(10);
+ /// vec.push(42);
/// assert_eq!(vec.capacity(), 10);
/// ```
#[inline]
@@ -884,10 +997,10 @@ impl<T, A: Allocator> Vec<T, A> {
}
/// Reserves capacity for at least `additional` more elements to be inserted
- /// in the given `Vec<T>`. The collection may reserve more space to avoid
- /// frequent reallocations. After calling `reserve`, capacity will be
- /// greater than or equal to `self.len() + additional`. Does nothing if
- /// capacity is already sufficient.
+ /// in the given `Vec<T>`. The collection may reserve more space to
+ /// speculatively avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
///
/// # Panics
///
@@ -906,10 +1019,12 @@ impl<T, A: Allocator> Vec<T, A> {
self.buf.reserve(self.len, additional);
}
- /// Reserves the minimum capacity for exactly `additional` more elements to
- /// be inserted in the given `Vec<T>`. After calling `reserve_exact`,
- /// capacity will be greater than or equal to `self.len() + additional`.
- /// Does nothing if the capacity is already sufficient.
+ /// Reserves the minimum capacity for at least `additional` more elements to
+ /// be inserted in the given `Vec<T>`. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
///
/// Note that the allocator may give the collection more space than it
/// requests. Therefore, capacity can not be relied upon to be precisely
@@ -935,10 +1050,11 @@ impl<T, A: Allocator> Vec<T, A> {
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
- /// in the given `Vec<T>`. The collection may reserve more space to avoid
+ /// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
- /// greater than or equal to `self.len() + additional`. Does nothing if
- /// capacity is already sufficient.
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -970,10 +1086,11 @@ impl<T, A: Allocator> Vec<T, A> {
self.buf.try_reserve(self.len, additional)
}
- /// Tries to reserve the minimum capacity for exactly `additional`
- /// elements to be inserted in the given `Vec<T>`. After calling
- /// `try_reserve_exact`, capacity will be greater than or equal to
- /// `self.len() + additional` if it returns `Ok(())`.
+ /// Tries to reserve the minimum capacity for at least `additional`
+ /// elements to be inserted in the given `Vec<T>`. Unlike [`try_reserve`],
+ /// this will not deliberately over-allocate to speculatively avoid frequent
+ /// allocations. After calling `try_reserve_exact`, capacity will be greater
+ /// than or equal to `self.len() + additional` if it returns `Ok(())`.
/// Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it
@@ -1198,7 +1315,8 @@ impl<T, A: Allocator> Vec<T, A> {
self
}
- /// Returns a raw pointer to the vector's buffer.
+ /// Returns a raw pointer to the vector's buffer, or a dangling raw pointer
+ /// valid for zero sized reads if the vector didn't allocate.
///
/// The caller must ensure that the vector outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
@@ -1235,7 +1353,8 @@ impl<T, A: Allocator> Vec<T, A> {
ptr
}
- /// Returns an unsafe mutable pointer to the vector's buffer.
+ /// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
+ /// raw pointer valid for zero sized reads if the vector didn't allocate.
///
/// The caller must ensure that the vector outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
@@ -1439,9 +1558,6 @@ impl<T, A: Allocator> Vec<T, A> {
}
let len = self.len();
- if index > len {
- assert_failed(index, len);
- }
// space for the new element
if len == self.buf.capacity() {
@@ -1453,9 +1569,15 @@ impl<T, A: Allocator> Vec<T, A> {
// The spot to put the new value
{
let p = self.as_mut_ptr().add(index);
- // Shift everything over to make space. (Duplicating the
- // `index`th element into two consecutive places.)
- ptr::copy(p, p.offset(1), len - index);
+ if index < len {
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.add(1), len - index);
+ } else if index == len {
+ // No elements need shifting.
+ } else {
+ assert_failed(index, len);
+ }
// Write it in, overwriting the first copy of the `index`th
// element.
ptr::write(p, element);
@@ -1512,7 +1634,7 @@ impl<T, A: Allocator> Vec<T, A> {
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
- ptr::copy(ptr.offset(1), ptr, len - index - 1);
+ ptr::copy(ptr.add(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
@@ -1561,11 +1683,11 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// ```
/// let mut vec = vec![1, 2, 3, 4];
- /// vec.retain_mut(|x| if *x > 3 {
- /// false
- /// } else {
+ /// vec.retain_mut(|x| if *x <= 3 {
/// *x += 1;
/// true
+ /// } else {
+ /// false
/// });
/// assert_eq!(vec, [2, 3, 4]);
/// ```
@@ -1853,6 +1975,51 @@ impl<T, A: Allocator> Vec<T, A> {
Ok(())
}
+ /// Appends an element if there is sufficient spare capacity, otherwise an error is returned
+ /// with the element.
+ ///
+ /// Unlike [`push`] this method will not reallocate when there's insufficient capacity.
+ /// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity.
+ ///
+ /// [`push`]: Vec::push
+ /// [`reserve`]: Vec::reserve
+ /// [`try_reserve`]: Vec::try_reserve
+ ///
+ /// # Examples
+ ///
+ /// A manual, panic-free alternative to [`FromIterator`]:
+ ///
+ /// ```
+ /// #![feature(vec_push_within_capacity)]
+ ///
+ /// use std::collections::TryReserveError;
+ /// fn from_iter_fallible<T>(iter: impl Iterator<Item=T>) -> Result<Vec<T>, TryReserveError> {
+ /// let mut vec = Vec::new();
+ /// for value in iter {
+ /// if let Err(value) = vec.push_within_capacity(value) {
+ /// vec.try_reserve(1)?;
+ /// // this cannot fail, the previous line either returned or added at least 1 free slot
+ /// let _ = vec.push_within_capacity(value);
+ /// }
+ /// }
+ /// Ok(vec)
+ /// }
+ /// assert_eq!(from_iter_fallible(0..100), Ok(Vec::from_iter(0..100)));
+ /// ```
+ #[inline]
+ #[unstable(feature = "vec_push_within_capacity", issue = "100486")]
+ pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> {
+ if self.len == self.buf.capacity() {
+ return Err(value);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ Ok(())
+ }
+
/// Removes the last element from a vector and returns it, or [`None`] if it
/// is empty.
///
@@ -1885,7 +2052,7 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// # Panics
///
- /// Panics if the number of elements in the vector overflows a `usize`.
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Examples
///
@@ -1917,6 +2084,17 @@ impl<T, A: Allocator> Vec<T, A> {
self.len += count;
}
+ /// Tries to append elements to `self` from other buffer.
+ #[inline]
+ unsafe fn try_append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> {
+ let count = unsafe { (*other).len() };
+ self.try_reserve(count)?;
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ Ok(())
+ }
+
/// Removes the specified range from the vector in bulk, returning all
/// removed elements as an iterator. If the iterator is dropped before
/// being fully consumed, it drops the remaining removed elements.
@@ -1968,9 +2146,7 @@ impl<T, A: Allocator> Vec<T, A> {
unsafe {
// set self.vec length's to start, to be safe in case Drain is leaked
self.set_len(start);
- // Use the borrow in the IterMut to indicate borrowing behavior of the
- // whole Drain iterator (like &mut T).
- let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
+ let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
Drain {
tail_start: end,
tail_len: len - end,
@@ -2162,7 +2338,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// static_ref[0] += 1;
/// assert_eq!(static_ref, &[2, 2, 3]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_leak", since = "1.47.0")]
#[inline]
pub fn leak<'a>(self) -> &'a mut [T]
@@ -2338,6 +2513,45 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
}
}
+ /// Tries to resize the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ /// If you only need to resize to a smaller size, use [`Vec::truncate`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.try_resize(3, "world").unwrap();
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.try_resize(2, 0).unwrap();
+ /// assert_eq!(vec, [1, 2]);
+ ///
+ /// let mut vec = vec![42];
+ /// let result = vec.try_resize(usize::MAX, 0);
+ /// assert!(result.is_err());
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), TryReserveError> {
+ let len = self.len();
+
+ if new_len > len {
+ self.try_extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ Ok(())
+ }
+ }
+
/// Clones and appends all elements in a slice to the `Vec`.
///
/// Iterates over the slice `other`, clones each element, and then appends
@@ -2363,6 +2577,30 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
self.spec_extend(other.iter())
}
+ /// Tries to clone and append all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` slice is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.try_extend_from_slice(&[2, 3, 4]).unwrap();
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_extend_from_slice(&mut self, other: &[T]) -> Result<(), TryReserveError> {
+ self.try_spec_extend(other.iter())
+ }
+
/// Copies elements from `src` range to the end of the vector.
///
/// # Panics
@@ -2426,7 +2664,7 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
#[unstable(feature = "slice_flatten", issue = "95629")]
pub fn into_flattened(self) -> Vec<T, A> {
let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
- let (new_len, new_cap) = if mem::size_of::<T>() == 0 {
+ let (new_len, new_cap) = if T::IS_ZST {
(len.checked_mul(N).expect("vec len overflow"), usize::MAX)
} else {
// SAFETY:
@@ -2488,7 +2726,7 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
- ptr = ptr.offset(1);
+ ptr = ptr.add(1);
// Increment the length in every step in case next() panics
local_len.increment_len(1);
}
@@ -2502,6 +2740,36 @@ impl<T, A: Allocator> Vec<T, A> {
// len set by scope guard
}
}
+
+ /// Try to extend the vector by `n` values, using the given generator.
+ fn try_extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) -> Result<(), TryReserveError> {
+ self.try_reserve(n)?;
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // might not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.add(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ Ok(())
+ }
+ }
}
impl<T: PartialEq, A: Allocator> Vec<T, A> {
@@ -2584,7 +2852,7 @@ impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
// SAFETY:
// - Both pointers are created from unique slice references (`&mut [_]`)
// so they are valid and do not overlap.
- // - Elements are :Copy so it's OK to to copy them, without doing
+ // - Elements are :Copy so it's OK to copy them, without doing
// anything with the original values
// - `count` is equal to the len of `source`, so source is valid for
// `count` reads
@@ -2607,6 +2875,7 @@ impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
impl<T, A: Allocator> ops::Deref for Vec<T, A> {
type Target = [T];
+ #[inline]
fn deref(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
}
@@ -2614,6 +2883,7 @@ impl<T, A: Allocator> ops::Deref for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
+ #[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
}
@@ -2740,10 +3010,13 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
///
/// ```
/// let v = vec!["a".to_string(), "b".to_string()];
- /// for s in v.into_iter() {
- /// // s has type String, not &String
- /// println!("{s}");
- /// }
+ /// let mut v_iter = v.into_iter();
+ ///
+ /// let first_element: Option<String> = v_iter.next();
+ ///
+ /// assert_eq!(first_element, Some("a".to_string()));
+ /// assert_eq!(v_iter.next(), Some("b".to_string()));
+ /// assert_eq!(v_iter.next(), None);
/// ```
#[inline]
fn into_iter(self) -> IntoIter<T, A> {
@@ -2751,8 +3024,8 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
let mut me = ManuallyDrop::new(self);
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
let begin = me.as_mut_ptr();
- let end = if mem::size_of::<T>() == 0 {
- arith_offset(begin as *const i8, me.len() as isize) as *const T
+ let end = if T::IS_ZST {
+ begin.wrapping_byte_add(me.len())
} else {
begin.add(me.len()) as *const T
};
@@ -2836,6 +3109,34 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
+ // leaf method to which various SpecFrom/SpecExtend implementations delegate when
+ // they have no further optimizations to apply
+ fn try_extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a general iterator.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iterator {
+ // self.push(item);
+ // }
+ while let Some(element) = iterator.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iterator.size_hint();
+ self.try_reserve(lower.saturating_add(1))?;
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // Since next() executes user code which can panic we have to bump the length
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+
+ Ok(())
+ }
+
/// Creates a splicing iterator that replaces the specified range in the vector
/// with the given `replace_with` iterator and yields the removed items.
/// `replace_with` does not need to be the same length as `range`.
@@ -3002,6 +3303,8 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for Vec<T> {
/// Creates an empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
fn default() -> Vec<T> {
Vec::new()
}
@@ -3094,15 +3397,19 @@ impl<T, const N: usize> From<[T; N]> for Vec<T> {
/// ```
#[cfg(not(test))]
fn from(s: [T; N]) -> Vec<T> {
- <[T]>::into_vec(box s)
+ <[T]>::into_vec(
+ #[rustc_box]
+ Box::new(s),
+ )
}
#[cfg(test)]
fn from(s: [T; N]) -> Vec<T> {
- crate::slice::into_vec(box s)
+ crate::slice::into_vec(Box::new(s))
}
}
+#[cfg(not(no_borrow))]
#[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
impl<'a, T> From<Cow<'a, [T]>> for Vec<T>
where
diff --git a/rust/alloc/vec/set_len_on_drop.rs b/rust/alloc/vec/set_len_on_drop.rs
new file mode 100644
index 000000000000..8b66bc812129
--- /dev/null
+++ b/rust/alloc/vec/set_len_on_drop.rs
@@ -0,0 +1,28 @@
+// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+//
+// The idea is: The length field in SetLenOnDrop is a local variable
+// that the optimizer will see does not alias with any stores through the Vec's data
+// pointer. This is a workaround for alias analysis issue #32155
+pub(super) struct SetLenOnDrop<'a> {
+ len: &'a mut usize,
+ local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+ #[inline]
+ pub(super) fn new(len: &'a mut usize) -> Self {
+ SetLenOnDrop { local_len: *len, len }
+ }
+
+ #[inline]
+ pub(super) fn increment_len(&mut self, increment: usize) {
+ self.local_len += increment;
+ }
+}
+
+impl Drop for SetLenOnDrop<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ *self.len = self.local_len;
+ }
+}
diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
new file mode 100644
index 000000000000..73e69fec87f5
--- /dev/null
+++ b/rust/alloc/vec/spec_extend.rs
@@ -0,0 +1,170 @@
+use crate::alloc::Allocator;
+use crate::collections::{TryReserveError, TryReserveErrorKind};
+use core::iter::TrustedLen;
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::{IntoIter, SetLenOnDrop, Vec};
+
+// Specialization trait used for Vec::extend
+#[cfg(not(no_global_oom_handling))]
+pub(super) trait SpecExtend<T, I> {
+ fn spec_extend(&mut self, iter: I);
+}
+
+// Specialization trait used for Vec::try_extend
+pub(super) trait TrySpecExtend<T, I> {
+ fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter)
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> {
+ self.try_extend_desugared(iter)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.add(1);
+ // Since the loop executes user code which can panic we have to bump the pointer
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.try_reserve(additional)?;
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.add(1);
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ Ok(())
+ } else {
+ Err(TryReserveErrorKind::CapacityOverflow.into())
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
+ unsafe {
+ self.append_elements(iterator.as_slice() as _);
+ }
+ iterator.forget_remaining_elements();
+ }
+}
+
+impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserveError> {
+ unsafe {
+ self.try_append_elements(iterator.as_slice() as _)?;
+ }
+ iterator.ptr = iterator.end;
+ Ok(())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.cloned())
+ }
+}
+
+impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ self.try_spec_extend(iterator.cloned())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ unsafe { self.append_elements(slice) };
+ }
+}
+
+impl<'a, T: 'a, A: Allocator + 'a> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn try_spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> {
+ let slice = iterator.as_slice();
+ unsafe { self.try_append_elements(slice) }
+ }
+}
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index c48bc284214a..b3ef609d0947 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -6,8 +6,59 @@
* Sorted alphabetically.
*/
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/iosys-map.h>
+#include <linux/io-pgtable.h>
+#include <linux/ktime.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/sysctl.h>
+#include <linux/timekeeping.h>
+#include <linux/xarray.h>
+#include <uapi/asm-generic/ioctl.h>
+#include <uapi/drm/asahi_drm.h>
+#include <uapi/drm/drm.h>
/* `bindgen` gets confused at certain things. */
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO;
+
+const gfp_t BINDINGS_XA_FLAGS_LOCK_IRQ = XA_FLAGS_LOCK_IRQ;
+const gfp_t BINDINGS_XA_FLAGS_LOCK_BH = XA_FLAGS_LOCK_BH;
+const gfp_t BINDINGS_XA_FLAGS_TRACK_FREE = XA_FLAGS_TRACK_FREE;
+const gfp_t BINDINGS_XA_FLAGS_ZERO_BUSY = XA_FLAGS_ZERO_BUSY;
+const gfp_t BINDINGS_XA_FLAGS_ALLOC_WRAPPED = XA_FLAGS_ALLOC_WRAPPED;
+const gfp_t BINDINGS_XA_FLAGS_ACCOUNT = XA_FLAGS_ACCOUNT;
+const gfp_t BINDINGS_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC;
+const gfp_t BINDINGS_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1;
+
+const xa_mark_t BINDINGS_XA_MARK_0 = XA_MARK_0;
+const xa_mark_t BINDINGS_XA_MARK_1 = XA_MARK_1;
+const xa_mark_t BINDINGS_XA_MARK_2 = XA_MARK_2;
+const xa_mark_t BINDINGS_XA_PRESENT = XA_PRESENT;
+const xa_mark_t BINDINGS_XA_MARK_MAX = XA_MARK_MAX;
+const xa_mark_t BINDINGS_XA_FREE_MARK = XA_FREE_MARK;
+
+const __poll_t BINDINGS_EPOLLIN = EPOLLIN;
+const __poll_t BINDINGS_EPOLLOUT = EPOLLOUT;
+const __poll_t BINDINGS_EPOLLERR = EPOLLERR;
+const __poll_t BINDINGS_EPOLLHUP = EPOLLHUP;
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index 6c50ee62c56b..9bcbea04dac3 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -9,7 +9,6 @@
//! using this crate.
#![no_std]
-#![feature(core_ffi_c)]
// See <https://github.com/rust-lang/rust-bindgen/issues/1651>.
#![cfg_attr(test, allow(deref_nullptr))]
#![cfg_attr(test, allow(unaligned_references))]
@@ -41,6 +40,7 @@ mod bindings_raw {
#[allow(dead_code)]
mod bindings_helper {
// Import the generated bindings for types.
+ use super::bindings_raw::*;
include!(concat!(
env!("OBJTREE"),
"/rust/bindings/bindings_helpers_generated.rs"
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index f8f39a3e6855..43378357ece9 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -28,7 +28,7 @@ macro_rules! define_panicking_intrinsics(
($reason: tt, { $($ident: ident, )* }) => {
$(
#[doc(hidden)]
- #[no_mangle]
+ #[export_name = concat!("__rust", stringify!($ident))]
pub extern "C" fn $ident() {
panic!($reason);
}
@@ -61,3 +61,6 @@ define_panicking_intrinsics!("`u128` should not be used", {
__udivti3,
__umodti3,
});
+
+// NOTE: if you are adding a new intrinsic here, you should also add it to
+// `redirect-intrinsics` in `rust/Makefile`.
diff --git a/rust/helpers.c b/rust/helpers.c
index b4f15eee2ffd..e023b1265b58 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -18,8 +18,25 @@
* accidentally exposed.
*/
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_syncobj.h>
#include <linux/bug.h>
#include <linux/build_bug.h>
+#include <linux/device.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errname.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+#include <linux/xarray.h>
__noreturn void rust_helper_BUG(void)
{
@@ -27,6 +44,504 @@ __noreturn void rust_helper_BUG(void)
}
EXPORT_SYMBOL_GPL(rust_helper_BUG);
+refcount_t rust_helper_REFCOUNT_INIT(int n)
+{
+ return (refcount_t)REFCOUNT_INIT(n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
+
+void rust_helper_refcount_inc(refcount_t *r)
+{
+ refcount_inc(r);
+}
+EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
+
+bool rust_helper_refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_dec_and_test(r);
+}
+EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
+
+__force void *rust_helper_ERR_PTR(long err)
+{
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(rust_helper_ERR_PTR);
+
+bool rust_helper_IS_ERR(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
+
+long rust_helper_PTR_ERR(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
+
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
+EXPORT_SYMBOL_GPL(rust_helper_errname);
+
+void rust_helper_xa_init_flags(struct xarray *xa, gfp_t flags)
+{
+ xa_init_flags(xa, flags);
+}
+EXPORT_SYMBOL_GPL(rust_helper_xa_init_flags);
+
+bool rust_helper_xa_empty(struct xarray *xa)
+{
+ return xa_empty(xa);
+}
+EXPORT_SYMBOL_GPL(rust_helper_xa_empty);
+
+int rust_helper_xa_alloc(struct xarray *xa, u32 *id, void *entry, struct xa_limit limit, gfp_t gfp)
+{
+ return xa_alloc(xa, id, entry, limit, gfp);
+}
+EXPORT_SYMBOL_GPL(rust_helper_xa_alloc);
+
+void rust_helper_xa_lock(struct xarray *xa)
+{
+ xa_lock(xa);
+}
+EXPORT_SYMBOL_GPL(rust_helper_xa_lock);
+
+void rust_helper_xa_unlock(struct xarray *xa)
+{
+ xa_unlock(xa);
+}
+EXPORT_SYMBOL_GPL(rust_helper_xa_unlock);
+
+int rust_helper_xa_err(void *entry)
+{
+ return xa_err(entry);
+}
+EXPORT_SYMBOL_GPL(rust_helper_xa_err);
+
+void *rust_helper_dev_get_drvdata(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dev_get_drvdata);
+
+const char *rust_helper_dev_name(const struct device *dev)
+{
+ return dev_name(dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dev_name);
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_current);
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_wait);
+
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
+
+unsigned long rust_helper_clear_user(void __user *to, unsigned long n)
+{
+ return clear_user(to, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_clear_user);
+
+void rust_helper_rcu_read_lock(void)
+{
+ rcu_read_lock();
+}
+EXPORT_SYMBOL_GPL(rust_helper_rcu_read_lock);
+
+void rust_helper_rcu_read_unlock(void)
+{
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(rust_helper_rcu_read_unlock);
+
+void rust_helper_synchronize_rcu(void)
+{
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(rust_helper_synchronize_rcu);
+
+void __iomem *rust_helper_ioremap(resource_size_t offset, unsigned long size)
+{
+ return ioremap(offset, size);
+}
+EXPORT_SYMBOL_GPL(rust_helper_ioremap);
+
+void __iomem *rust_helper_ioremap_np(resource_size_t offset, unsigned long size)
+{
+ return ioremap_np(offset, size);
+}
+EXPORT_SYMBOL_GPL(rust_helper_ioremap_np);
+
+u8 rust_helper_readb(const volatile void __iomem *addr)
+{
+ return readb(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readb);
+
+u16 rust_helper_readw(const volatile void __iomem *addr)
+{
+ return readw(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readw);
+
+u32 rust_helper_readl(const volatile void __iomem *addr)
+{
+ return readl(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readl);
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readq);
+#endif
+
+void rust_helper_writeb(u8 value, volatile void __iomem *addr)
+{
+ writeb(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeb);
+
+void rust_helper_writew(u16 value, volatile void __iomem *addr)
+{
+ writew(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writew);
+
+void rust_helper_writel(u32 value, volatile void __iomem *addr)
+{
+ writel(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writel);
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq(u64 value, volatile void __iomem *addr)
+{
+ writeq(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeq);
+#endif
+
+u8 rust_helper_readb_relaxed(const volatile void __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readb_relaxed);
+
+u16 rust_helper_readw_relaxed(const volatile void __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readw_relaxed);
+
+u32 rust_helper_readl_relaxed(const volatile void __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readl_relaxed);
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq_relaxed(const volatile void __iomem *addr)
+{
+ return readq_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readq_relaxed);
+#endif
+
+void rust_helper_writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ writeb_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeb_relaxed);
+
+void rust_helper_writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ writew_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writew_relaxed);
+
+void rust_helper_writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ writel_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writel_relaxed);
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ writeq_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeq_relaxed);
+#endif
+
+void rust_helper_memcpy_fromio(void *to, const volatile void __iomem *from, long count)
+{
+ memcpy_fromio(to, from, count);
+}
+EXPORT_SYMBOL_GPL(rust_helper_memcpy_fromio);
+
+void *
+rust_helper_platform_get_drvdata(const struct platform_device *pdev)
+{
+ return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_platform_get_drvdata);
+
+void
+rust_helper_platform_set_drvdata(struct platform_device *pdev,
+ void *data)
+{
+ platform_set_drvdata(pdev, data);
+}
+EXPORT_SYMBOL_GPL(rust_helper_platform_set_drvdata);
+
+const struct of_device_id *rust_helper_of_match_device(
+ const struct of_device_id *matches, const struct device *dev)
+{
+ return of_match_device(matches, dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_of_match_device);
+
+bool rust_helper_of_node_is_root(const struct device_node *np)
+{
+ return of_node_is_root(np);
+}
+EXPORT_SYMBOL_GPL(rust_helper_of_node_is_root);
+
+struct device_node *rust_helper_of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ return of_parse_phandle(np, phandle_name, index);
+}
+EXPORT_SYMBOL_GPL(rust_helper_of_parse_phandle);
+
+int rust_helper_dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ return dma_set_mask_and_coherent(dev, mask);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dma_set_mask_and_coherent);
+
+resource_size_t rust_helper_resource_size(const struct resource *res)
+{
+ return resource_size(res);
+}
+EXPORT_SYMBOL_GPL(rust_helper_resource_size);
+
+void rust_helper_init_completion(struct completion *c)
+{
+ init_completion(c);
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_completion);
+
+dma_addr_t rust_helper_sg_dma_address(const struct scatterlist *sg)
+{
+ return sg_dma_address(sg);
+}
+EXPORT_SYMBOL_GPL(rust_helper_sg_dma_address);
+
+int rust_helper_sg_dma_len(const struct scatterlist *sg)
+{
+ return sg_dma_len(sg);
+}
+EXPORT_SYMBOL_GPL(rust_helper_sg_dma_len);
+
+void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+# ifndef CONFIG_PREEMPT_RT
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+# else
+ rt_mutex_base_init(&lock->lock);
+ __rt_spin_lock_init(lock, name, key, false);
+# endif
+#else
+ spin_lock_init(lock);
+#endif
+}
+EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
+
+unsigned long rust_helper_msecs_to_jiffies(const unsigned int m)
+{
+ return msecs_to_jiffies(m);
+}
+EXPORT_SYMBOL_GPL(rust_helper_msecs_to_jiffies);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+
+void rust_helper_dma_fence_get(struct dma_fence *fence)
+{
+ dma_fence_get(fence);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dma_fence_get);
+
+void rust_helper_dma_fence_put(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dma_fence_put);
+
+struct dma_fence_chain *rust_helper_dma_fence_chain_alloc(void)
+{
+ return dma_fence_chain_alloc();
+}
+EXPORT_SYMBOL_GPL(rust_helper_dma_fence_chain_alloc);
+
+void rust_helper_dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+ dma_fence_chain_free(chain);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dma_fence_chain_free);
+
+void rust_helper_dma_fence_set_error(struct dma_fence *fence, int error)
+{
+ dma_fence_set_error(fence, error);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dma_fence_set_error);
+
+#endif
+
+#ifdef CONFIG_DRM
+
+void rust_helper_drm_gem_object_get(struct drm_gem_object *obj)
+{
+ drm_gem_object_get(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_object_get);
+
+void rust_helper_drm_gem_object_put(struct drm_gem_object *obj)
+{
+ drm_gem_object_put(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_object_put);
+
+__u64 rust_helper_drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+ return drm_vma_node_offset_addr(node);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_vma_node_offset_addr);
+
+void rust_helper_drm_syncobj_get(struct drm_syncobj *obj)
+{
+ drm_syncobj_get(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_syncobj_get);
+
+void rust_helper_drm_syncobj_put(struct drm_syncobj *obj)
+{
+ drm_syncobj_put(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_syncobj_put);
+
+struct dma_fence *rust_helper_drm_syncobj_fence_get(struct drm_syncobj *syncobj)
+{
+ return drm_syncobj_fence_get(syncobj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_syncobj_fence_get);
+
+#ifdef CONFIG_DRM_GEM_SHMEM_HELPER
+
+void rust_helper_drm_gem_shmem_object_free(struct drm_gem_object *obj)
+{
+ return drm_gem_shmem_object_free(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_free);
+
+void rust_helper_drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ drm_gem_shmem_object_print_info(p, indent, obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_print_info);
+
+int rust_helper_drm_gem_shmem_object_pin(struct drm_gem_object *obj)
+{
+ return drm_gem_shmem_object_pin(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_pin);
+
+void rust_helper_drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
+{
+ drm_gem_shmem_object_unpin(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_unpin);
+
+struct sg_table *rust_helper_drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
+{
+ return drm_gem_shmem_object_get_sg_table(obj);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_get_sg_table);
+
+int rust_helper_drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ return drm_gem_shmem_object_vmap(obj, map);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_vmap);
+
+void rust_helper_drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ drm_gem_shmem_object_vunmap(obj, map);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_vunmap);
+
+int rust_helper_drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ return drm_gem_shmem_object_mmap(obj, vma);
+}
+EXPORT_SYMBOL_GPL(rust_helper_drm_gem_shmem_object_mmap);
+
+#endif
+#endif
+
+#ifdef mutex_lock
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ return mutex_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
+#endif
+
+#ifdef mutex_unlock
+void rust_helper_mutex_unlock(struct mutex *lock)
+{
+ return mutex_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_mutex_unlock);
+#endif
+
/*
* We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
* as the Rust `usize` type, so we can use it in contexts where Rust
diff --git a/rust/kernel/build_assert.rs b/rust/kernel/build_assert.rs
index 659542393c09..970980827214 100644
--- a/rust/kernel/build_assert.rs
+++ b/rust/kernel/build_assert.rs
@@ -67,6 +67,7 @@ macro_rules! build_error {
/// assert!(n > 1); // Run-time check
/// }
/// ```
+#[allow(rustdoc::broken_intra_doc_links)]
#[macro_export]
macro_rules! build_assert {
($cond:expr $(,)?) => {{
diff --git a/rust/kernel/delay.rs b/rust/kernel/delay.rs
new file mode 100644
index 000000000000..1e987fa65941
--- /dev/null
+++ b/rust/kernel/delay.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Delay functions for operations like sleeping.
+//!
+//! C header: [`include/linux/delay.h`](../../../../include/linux/delay.h)
+
+use crate::bindings;
+use core::{cmp::min, time::Duration};
+
+const MILLIS_PER_SEC: u64 = 1_000;
+
+fn coarse_sleep_conversion(duration: Duration) -> core::ffi::c_uint {
+ let milli_as_nanos = Duration::MILLISECOND.subsec_nanos();
+
+ // Rounds the nanosecond component of `duration` up to the nearest millisecond.
+ let nanos_as_millis = duration.subsec_nanos().wrapping_add(milli_as_nanos - 1) / milli_as_nanos;
+
+ // Saturates the second component of `duration` to `c_uint::MAX`.
+ let seconds_as_millis = min(
+ duration.as_secs().saturating_mul(MILLIS_PER_SEC),
+ u64::from(core::ffi::c_uint::MAX),
+ ) as core::ffi::c_uint;
+
+ seconds_as_millis.saturating_add(nanos_as_millis)
+}
+
+/// Sleeps safely even with waitqueue interruptions.
+///
+/// This function forwards the call to the C side `msleep` function. As a result,
+/// `duration` will be rounded up to the nearest millisecond if granularity less
+/// than a millisecond is provided. Any [`Duration`] that exceeds
+/// [`c_uint::MAX`][core::ffi::c_uint::MAX] in milliseconds is saturated.
+///
+/// # Examples
+///
+// Keep these in sync with `test_coarse_sleep_examples`.
+/// ```
+/// # use core::time::Duration;
+/// # use kernel::delay::coarse_sleep;
+/// coarse_sleep(Duration::ZERO); // Equivalent to `msleep(0)`.
+/// coarse_sleep(Duration::from_nanos(1)); // Equivalent to `msleep(1)`.
+///
+/// coarse_sleep(Duration::from_nanos(1_000_000)); // Equivalent to `msleep(1)`.
+/// coarse_sleep(Duration::from_nanos(1_000_001)); // Equivalent to `msleep(2)`.
+/// coarse_sleep(Duration::from_nanos(1_999_999)); // Equivalent to `msleep(2)`.
+///
+/// coarse_sleep(Duration::from_millis(1)); // Equivalent to `msleep(1)`.
+/// coarse_sleep(Duration::from_millis(2)); // Equivalent to `msleep(2)`.
+///
+/// coarse_sleep(Duration::from_secs(1)); // Equivalent to `msleep(1000)`.
+/// coarse_sleep(Duration::new(1, 1)); // Equivalent to `msleep(1001)`.
+/// coarse_sleep(Duration::new(1, 2)); // Equivalent to `msleep(1001)`.
+/// ```
+pub fn coarse_sleep(duration: Duration) {
+ // SAFETY: `msleep` is safe for all values of its argument.
+ unsafe { bindings::msleep(coarse_sleep_conversion(duration)) }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{coarse_sleep_conversion, MILLIS_PER_SEC};
+ use core::time::Duration;
+
+ #[test]
+ fn test_coarse_sleep_examples() {
+ // Keep these in sync with `coarse_sleep`'s `# Examples` section.
+
+ assert_eq!(coarse_sleep_conversion(Duration::ZERO), 0);
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1)), 1);
+
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_000_000)), 1);
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_000_001)), 2);
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_999_999)), 2);
+
+ assert_eq!(coarse_sleep_conversion(Duration::from_millis(1)), 1);
+ assert_eq!(coarse_sleep_conversion(Duration::from_millis(2)), 2);
+
+ assert_eq!(coarse_sleep_conversion(Duration::from_secs(1)), 1000);
+ assert_eq!(coarse_sleep_conversion(Duration::new(1, 1)), 1001);
+ assert_eq!(coarse_sleep_conversion(Duration::new(1, 2)), 1001);
+ }
+
+ #[test]
+ fn test_coarse_sleep_saturation() {
+ assert!(
+ coarse_sleep_conversion(Duration::new(
+ core::ffi::c_uint::MAX as u64 / MILLIS_PER_SEC,
+ 0
+ )) < core::ffi::c_uint::MAX
+ );
+ assert_eq!(
+ coarse_sleep_conversion(Duration::new(
+ core::ffi::c_uint::MAX as u64 / MILLIS_PER_SEC,
+ 999_999_999
+ )),
+ core::ffi::c_uint::MAX
+ );
+
+ assert_eq!(
+ coarse_sleep_conversion(Duration::MAX),
+ core::ffi::c_uint::MAX
+ );
+ }
+}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
new file mode 100644
index 000000000000..c8b7d69b1426
--- /dev/null
+++ b/rust/kernel/device.rs
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic devices that are part of the kernel's driver model.
+//!
+//! C header: [`include/linux/device.h`](../../../../include/linux/device.h)
+
+use crate::{
+ bindings,
+ error::Result,
+ of,
+ revocable::{Revocable, RevocableGuard},
+ str::CStr,
+ sync::{LockClassKey, NeedsLockClass, RevocableMutex, RevocableMutexGuard, UniqueArc},
+};
+use core::{
+ fmt,
+ ops::{Deref, DerefMut},
+ pin::Pin,
+};
+
+#[cfg(CONFIG_PRINTK)]
+use crate::c_str;
+
+/// A raw device.
+///
+/// # Safety
+///
+/// Implementers must ensure that the `*mut device` returned by [`RawDevice::raw_device`] is
+/// related to `self`, that is, actions on it will affect `self`. For example, if one calls
+/// `get_device`, then the refcount on the device represented by `self` will be incremented.
+///
+/// Additionally, implementers must ensure that the device is never renamed. Commit a5462516aa99
+/// ("driver-core: document restrictions on device_rename()") has details on why `device_rename`
+/// should not be used.
+pub unsafe trait RawDevice {
+ /// Returns the raw `struct device` related to `self`.
+ fn raw_device(&self) -> *mut bindings::device;
+
+ /// Returns the name of the device.
+ fn name(&self) -> &CStr {
+ let ptr = self.raw_device();
+
+ // SAFETY: `ptr` is valid because `self` keeps it alive.
+ let name = unsafe { bindings::dev_name(ptr) };
+
+ // SAFETY: The name of the device remains valid while it is alive (because the device is
+ // never renamed, per the safety requirement of this trait). This is guaranteed to be the
+ // case because the reference to `self` outlives the one of the returned `CStr` (enforced
+ // by the compiler because of their lifetimes).
+ unsafe { CStr::from_char_ptr(name) }
+ }
+
+ /// Gets the OpenFirmware node attached to this device
+ fn of_node(&self) -> Option<of::Node> {
+ let ptr = self.raw_device();
+
+ unsafe { of::Node::get_from_raw((*ptr).of_node) }
+ }
+
+ /// Prints an emergency-level message (level 0) prefixed with device information.
+ ///
+ /// More details are available from [`dev_emerg`].
+ ///
+ /// [`dev_emerg`]: crate::dev_emerg
+ fn pr_emerg(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_EMERG, args) };
+ }
+
+ /// Prints an alert-level message (level 1) prefixed with device information.
+ ///
+ /// More details are available from [`dev_alert`].
+ ///
+ /// [`dev_alert`]: crate::dev_alert
+ fn pr_alert(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_ALERT, args) };
+ }
+
+ /// Prints a critical-level message (level 2) prefixed with device information.
+ ///
+ /// More details are available from [`dev_crit`].
+ ///
+ /// [`dev_crit`]: crate::dev_crit
+ fn pr_crit(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_CRIT, args) };
+ }
+
+ /// Prints an error-level message (level 3) prefixed with device information.
+ ///
+ /// More details are available from [`dev_err`].
+ ///
+ /// [`dev_err`]: crate::dev_err
+ fn pr_err(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_ERR, args) };
+ }
+
+ /// Prints a warning-level message (level 4) prefixed with device information.
+ ///
+ /// More details are available from [`dev_warn`].
+ ///
+ /// [`dev_warn`]: crate::dev_warn
+ fn pr_warn(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_WARNING, args) };
+ }
+
+ /// Prints a notice-level message (level 5) prefixed with device information.
+ ///
+ /// More details are available from [`dev_notice`].
+ ///
+ /// [`dev_notice`]: crate::dev_notice
+ fn pr_notice(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_NOTICE, args) };
+ }
+
+ /// Prints an info-level message (level 6) prefixed with device information.
+ ///
+ /// More details are available from [`dev_info`].
+ ///
+ /// [`dev_info`]: crate::dev_info
+ fn pr_info(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_INFO, args) };
+ }
+
+ /// Prints a debug-level message (level 7) prefixed with device information.
+ ///
+ /// More details are available from [`dev_dbg`].
+ ///
+ /// [`dev_dbg`]: crate::dev_dbg
+ fn pr_dbg(&self, args: fmt::Arguments<'_>) {
+ if cfg!(debug_assertions) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_DEBUG, args) };
+ }
+ }
+
+ /// Prints the provided message to the console.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `klevel` is null-terminated; in particular, one of the
+ /// `KERN_*`constants, for example, `KERN_CRIT`, `KERN_ALERT`, etc.
+ #[cfg_attr(not(CONFIG_PRINTK), allow(unused_variables))]
+ unsafe fn printk(&self, klevel: &[u8], msg: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated and one of the kernel constants. `self.raw_device`
+ // is valid because `self` is valid. The "%pA" format string expects a pointer to
+ // `fmt::Arguments`, which is what we're passing as the last argument.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_dev_printk(
+ klevel as *const _ as *const core::ffi::c_char,
+ self.raw_device(),
+ c_str!("%pA").as_char_ptr(),
+ &msg as *const _ as *const core::ffi::c_void,
+ )
+ };
+ }
+}
+
+/// A ref-counted device.
+///
+/// # Invariants
+///
+/// `ptr` is valid, non-null, and has a non-zero reference count. One of the references is owned by
+/// `self`, and will be decremented when `self` is dropped.
+pub struct Device {
+ pub(crate) ptr: *mut bindings::device,
+}
+
+// SAFETY: `Device` only holds a pointer to a C device, which is safe to be used from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` only holds a pointer to a C device, references to which are safe to be used
+// from any thread.
+unsafe impl Sync for Device {}
+
+impl Device {
+ /// Creates a new device instance.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count.
+ pub unsafe fn new(ptr: *mut bindings::device) -> Self {
+ // SAFETY: By the safety requirements, ptr is valid and its refcounted will be incremented.
+ unsafe { bindings::get_device(ptr) };
+ // INVARIANT: The safety requirements satisfy all but one invariant, which is that `self`
+ // owns a reference. This is satisfied by the call to `get_device` above.
+ Self { ptr }
+ }
+
+ /// Creates a new device instance from an existing [`RawDevice`] instance.
+ pub fn from_dev(dev: &dyn RawDevice) -> Self {
+ // SAFETY: The requirements are satisfied by the existence of `RawDevice` and its safety
+ // requirements.
+ unsafe { Self::new(dev.raw_device()) }
+ }
+}
+
+// SAFETY: The device returned by `raw_device` is the one for which we hold a reference.
+unsafe impl RawDevice for Device {
+ fn raw_device(&self) -> *mut bindings::device {
+ self.ptr
+ }
+}
+
+impl Drop for Device {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // relinquish it now.
+ unsafe { bindings::put_device(self.ptr) };
+ }
+}
+
+impl Clone for Device {
+ fn clone(&self) -> Self {
+ Device::from_dev(self)
+ }
+}
+
+/// Device data.
+///
+/// When a device is removed (for whatever reason, for example, because the device was unplugged or
+/// because the user decided to unbind the driver), the driver is given a chance to clean its state
+/// up, and all io resources should ideally not be used anymore.
+///
+/// However, the device data is reference-counted because other subsystems hold pointers to it. So
+/// some device state must be freed and not used anymore, while others must remain accessible.
+///
+/// This struct separates the device data into three categories:
+/// 1. Registrations: are destroyed when the device is removed, but before the io resources
+/// become inaccessible.
+/// 2. Io resources: are available until the device is removed.
+/// 3. General data: remain available as long as the ref count is nonzero.
+///
+/// This struct implements the `DeviceRemoval` trait so that it can clean resources up even if not
+/// explicitly called by the device drivers.
+pub struct Data<T, U, V> {
+ registrations: RevocableMutex<T>,
+ resources: Revocable<U>,
+ general: V,
+}
+
+/// Safely creates an new reference-counted instance of [`Data`].
+#[doc(hidden)]
+#[macro_export]
+macro_rules! new_device_data {
+ ($reg:expr, $res:expr, $gen:expr, $name:literal) => {{
+ static CLASS1: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ static CLASS2: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ let regs = $reg;
+ let res = $res;
+ let gen = $gen;
+ let name = $crate::c_str!($name);
+ $crate::device::Data::try_new(regs, res, gen, name, &CLASS1, &CLASS2)
+ }};
+}
+
+impl<T, U, V> Data<T, U, V> {
+ /// Creates a new instance of `Data`.
+ ///
+ /// It is recommended that the [`new_device_data`] macro be used as it automatically creates
+ /// the lock classes.
+ pub fn try_new(
+ registrations: T,
+ resources: U,
+ general: V,
+ name: &'static CStr,
+ key1: &'static LockClassKey,
+ key2: &'static LockClassKey,
+ ) -> Result<Pin<UniqueArc<Self>>> {
+ let mut ret = Pin::from(UniqueArc::try_new(Self {
+ // SAFETY: We call `RevocableMutex::init` below.
+ registrations: unsafe { RevocableMutex::new(registrations) },
+ resources: Revocable::new(resources),
+ general,
+ })?);
+
+ // SAFETY: `Data::registrations` is pinned when `Data` is.
+ let pinned = unsafe { ret.as_mut().map_unchecked_mut(|d| &mut d.registrations) };
+ pinned.init(name, key1, key2);
+ Ok(ret)
+ }
+
+ /// Returns the resources if they're still available.
+ pub fn resources(&self) -> Option<RevocableGuard<'_, U>> {
+ self.resources.try_access()
+ }
+
+ /// Returns the locked registrations if they're still available.
+ pub fn registrations(&self) -> Option<RevocableMutexGuard<'_, T>> {
+ self.registrations.try_write()
+ }
+}
+
+impl<T, U, V> crate::driver::DeviceRemoval for Data<T, U, V> {
+ fn device_remove(&self) {
+ // We revoke the registrations first so that resources are still available to them during
+ // unregistration.
+ self.registrations.revoke();
+
+ // Release resources now. General data remains available.
+ self.resources.revoke();
+ }
+}
+
+impl<T, U, V> Deref for Data<T, U, V> {
+ type Target = V;
+
+ fn deref(&self) -> &V {
+ &self.general
+ }
+}
+
+impl<T, U, V> DerefMut for Data<T, U, V> {
+ fn deref_mut(&mut self) -> &mut V {
+ &mut self.general
+ }
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! dev_printk {
+ ($method:ident, $dev:expr, $($f:tt)*) => {
+ {
+ // We have an explicity `use` statement here so that callers of this macro are not
+ // required to explicitly use the `RawDevice` trait to use its functions.
+ use $crate::device::RawDevice;
+ ($dev).$method(core::format_args!($($f)*));
+ }
+ }
+}
+
+/// Prints an emergency-level message (level 0) prefixed with device information.
+///
+/// This level should be used if the system is unusable.
+///
+/// Equivalent to the kernel's `dev_emerg` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_emerg!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_emerg {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_emerg, $($f)*); }
+}
+
+/// Prints an alert-level message (level 1) prefixed with device information.
+///
+/// This level should be used if action must be taken immediately.
+///
+/// Equivalent to the kernel's `dev_alert` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_alert!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_alert {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_alert, $($f)*); }
+}
+
+/// Prints a critical-level message (level 2) prefixed with device information.
+///
+/// This level should be used in critical conditions.
+///
+/// Equivalent to the kernel's `dev_crit` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_crit!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_crit {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_crit, $($f)*); }
+}
+
+/// Prints an error-level message (level 3) prefixed with device information.
+///
+/// This level should be used in error conditions.
+///
+/// Equivalent to the kernel's `dev_err` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_err!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_err {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_err, $($f)*); }
+}
+
+/// Prints a warning-level message (level 4) prefixed with device information.
+///
+/// This level should be used in warning conditions.
+///
+/// Equivalent to the kernel's `dev_warn` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_warn!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_warn {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_warn, $($f)*); }
+}
+
+/// Prints a notice-level message (level 5) prefixed with device information.
+///
+/// This level should be used in normal but significant conditions.
+///
+/// Equivalent to the kernel's `dev_notice` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_notice!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_notice {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_notice, $($f)*); }
+}
+
+/// Prints an info-level message (level 6) prefixed with device information.
+///
+/// This level should be used for informational messages.
+///
+/// Equivalent to the kernel's `dev_info` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_info!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_info {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_info, $($f)*); }
+}
+
+/// Prints a debug-level message (level 7) prefixed with device information.
+///
+/// This level should be used for debug messages.
+///
+/// Equivalent to the kernel's `dev_dbg` macro, except that it doesn't support dynamic debug yet.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_dbg!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_dbg {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_dbg, $($f)*); }
+}
diff --git a/rust/kernel/dma_fence.rs b/rust/kernel/dma_fence.rs
new file mode 100644
index 000000000000..ca93380d9da2
--- /dev/null
+++ b/rust/kernel/dma_fence.rs
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! DMA fence abstraction.
+//!
+//! C header: [`include/linux/dma_fence.h`](../../include/linux/dma_fence.h)
+
+use crate::{
+ bindings,
+ error::{to_result, Result},
+ prelude::*,
+ sync::LockClassKey,
+ types::Opaque,
+};
+use core::fmt::Write;
+use core::ops::{Deref, DerefMut};
+use core::ptr::addr_of_mut;
+use core::sync::atomic::{AtomicU64, Ordering};
+
+/// Any kind of DMA Fence Object
+///
+/// # Invariants
+/// raw() returns a valid pointer to a dma_fence and we own a reference to it.
+pub trait RawDmaFence: crate::private::Sealed {
+ /// Returns the raw `struct dma_fence` pointer.
+ fn raw(&self) -> *mut bindings::dma_fence;
+
+ /// Returns the raw `struct dma_fence` pointer and consumes the object.
+ ///
+ /// The caller is responsible for dropping the reference.
+ fn into_raw(self) -> *mut bindings::dma_fence
+ where
+ Self: Sized,
+ {
+ let ptr = self.raw();
+ core::mem::forget(self);
+ ptr
+ }
+
+ /// Advances this fence to the chain node which will signal this sequence number.
+ /// If no sequence number is provided, this returns `self` again.
+ fn chain_find_seqno(self, seqno: u64) -> Result<Fence>
+ where
+ Self: Sized,
+ {
+ let mut ptr = self.into_raw();
+
+ // SAFETY: This will safely fail if this DmaFence is not a chain.
+ // `ptr` is valid per the type invariant.
+ let ret = unsafe { bindings::dma_fence_chain_find_seqno(&mut ptr, seqno) };
+
+ if ret != 0 {
+ // SAFETY: This is either an owned reference or NULL, dma_fence_put can handle both.
+ unsafe { bindings::dma_fence_put(ptr) };
+ Err(Error::from_kernel_errno(ret))
+ } else if ptr.is_null() {
+ Err(EINVAL) // When can this happen?
+ } else {
+ // SAFETY: ptr is valid and non-NULL as checked above.
+ Ok(unsafe { Fence::from_raw(ptr) })
+ }
+ }
+
+ /// Signal completion of this fence
+ fn signal(&self) -> Result {
+ to_result(unsafe { bindings::dma_fence_signal(self.raw()) })
+ }
+
+ /// Set the error flag on this fence
+ fn set_error(&self, err: Error) {
+ unsafe { bindings::dma_fence_set_error(self.raw(), err.to_kernel_errno()) };
+ }
+}
+
+/// A generic DMA Fence Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a dma_fence and we own a reference to it.
+pub struct Fence {
+ ptr: *mut bindings::dma_fence,
+}
+
+impl Fence {
+ /// Create a new Fence object from a raw pointer to a dma_fence.
+ ///
+ /// # Safety
+ /// The caller must own a reference to the dma_fence, which is transferred to the new object.
+ pub(crate) unsafe fn from_raw(ptr: *mut bindings::dma_fence) -> Fence {
+ Fence { ptr }
+ }
+
+ /// Create a new Fence object from a raw pointer to a dma_fence.
+ ///
+ /// # Safety
+ /// Takes a borrowed reference to the dma_fence, and increments the reference count.
+ pub(crate) unsafe fn get_raw(ptr: *mut bindings::dma_fence) -> Fence {
+ // SAFETY: Pointer is valid per the safety contract
+ unsafe { bindings::dma_fence_get(ptr) };
+ Fence { ptr }
+ }
+
+ /// Create a new Fence object from a RawDmaFence.
+ pub fn from_fence(fence: &dyn RawDmaFence) -> Fence {
+ // SAFETY: Pointer is valid per the RawDmaFence contract
+ unsafe { Self::get_raw(fence.raw()) }
+ }
+}
+
+impl crate::private::Sealed for Fence {}
+
+impl RawDmaFence for Fence {
+ fn raw(&self) -> *mut bindings::dma_fence {
+ self.ptr
+ }
+}
+
+impl Drop for Fence {
+ fn drop(&mut self) {
+ // SAFETY: We own a reference to this syncobj.
+ unsafe { bindings::dma_fence_put(self.ptr) };
+ }
+}
+
+impl Clone for Fence {
+ fn clone(&self) -> Self {
+ // SAFETY: `ptr` is valid per the type invariant and we own a reference to it.
+ unsafe {
+ bindings::dma_fence_get(self.ptr);
+ Self::from_raw(self.ptr)
+ }
+ }
+}
+
+unsafe impl Sync for Fence {}
+unsafe impl Send for Fence {}
+
+/// Trait which must be implemented by driver-specific fence objects.
+#[vtable]
+pub trait FenceOps: Sized + Send + Sync {
+ /// True if this dma_fence implementation uses 64bit seqno, false otherwise.
+ const USE_64BIT_SEQNO: bool;
+
+ /// Returns the driver name. This is a callback to allow drivers to compute the name at
+ /// runtime, without having it to store permanently for each fence, or build a cache of
+ /// some sort.
+ fn get_driver_name<'a>(self: &'a FenceObject<Self>) -> &'a CStr;
+
+ /// Return the name of the context this fence belongs to. This is a callback to allow drivers
+ /// to compute the name at runtime, without having it to store permanently for each fence, or
+ /// build a cache of some sort.
+ fn get_timeline_name<'a>(self: &'a FenceObject<Self>) -> &'a CStr;
+
+ /// Enable software signaling of fence.
+ fn enable_signaling(self: &FenceObject<Self>) -> bool {
+ false
+ }
+
+ /// Peek whether the fence is signaled, as a fastpath optimization for e.g. dma_fence_wait() or
+ /// dma_fence_add_callback().
+ fn signaled(self: &FenceObject<Self>) -> bool {
+ false
+ }
+
+ /// Callback to fill in free-form debug info specific to this fence, like the sequence number.
+ fn fence_value_str(self: &FenceObject<Self>, _output: &mut dyn Write) {}
+
+ /// Fills in the current value of the timeline as a string, like the sequence number. Note that
+ /// the specific fence passed to this function should not matter, drivers should only use it to
+ /// look up the corresponding timeline structures.
+ fn timeline_value_str(self: &FenceObject<Self>, _output: &mut dyn Write) {}
+}
+
+unsafe extern "C" fn get_driver_name_cb<T: FenceOps>(
+ fence: *mut bindings::dma_fence,
+) -> *const core::ffi::c_char {
+ // SAFETY: All of our fences are FenceObject<T>.
+ let p = crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T>;
+
+ // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+ T::get_driver_name(unsafe { &mut *p }).as_char_ptr()
+}
+
+unsafe extern "C" fn get_timeline_name_cb<T: FenceOps>(
+ fence: *mut bindings::dma_fence,
+) -> *const core::ffi::c_char {
+ // SAFETY: All of our fences are FenceObject<T>.
+ let p = crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T>;
+
+ // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+ T::get_timeline_name(unsafe { &mut *p }).as_char_ptr()
+}
+
+unsafe extern "C" fn enable_signaling_cb<T: FenceOps>(fence: *mut bindings::dma_fence) -> bool {
+ // SAFETY: All of our fences are FenceObject<T>.
+ let p = crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T>;
+
+ // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+ T::enable_signaling(unsafe { &mut *p })
+}
+
+unsafe extern "C" fn signaled_cb<T: FenceOps>(fence: *mut bindings::dma_fence) -> bool {
+ // SAFETY: All of our fences are FenceObject<T>.
+ let p = crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T>;
+
+ // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+ T::signaled(unsafe { &mut *p })
+}
+
+unsafe extern "C" fn release_cb<T: FenceOps>(fence: *mut bindings::dma_fence) {
+ // SAFETY: All of our fences are FenceObject<T>.
+ let p = crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T>;
+
+ // SAFETY: p is never used after this
+ unsafe {
+ core::ptr::drop_in_place(&mut (*p).inner);
+ }
+
+ // SAFETY: All of our fences are allocated using kmalloc, so this is safe.
+ unsafe { bindings::dma_fence_free(fence) };
+}
+
+unsafe extern "C" fn fence_value_str_cb<T: FenceOps>(
+ fence: *mut bindings::dma_fence,
+ string: *mut core::ffi::c_char,
+ size: core::ffi::c_int,
+) {
+ let size: usize = size.try_into().unwrap_or(0);
+
+ if size == 0 {
+ return;
+ }
+
+ // SAFETY: All of our fences are FenceObject<T>.
+ let p = crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T>;
+
+ // SAFETY: The caller is responsible for the validity of string/size
+ let mut f = unsafe { crate::str::Formatter::from_buffer(string as *mut _, size) };
+
+ // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+ T::fence_value_str(unsafe { &mut *p }, &mut f);
+ let _ = f.write_str("\0");
+
+ // SAFETY: `size` is at least 1 per the check above
+ unsafe { *string.add(size - 1) = 0 };
+}
+
+unsafe extern "C" fn timeline_value_str_cb<T: FenceOps>(
+ fence: *mut bindings::dma_fence,
+ string: *mut core::ffi::c_char,
+ size: core::ffi::c_int,
+) {
+ let size: usize = size.try_into().unwrap_or(0);
+
+ if size == 0 {
+ return;
+ }
+
+ // SAFETY: All of our fences are FenceObject<T>.
+ let p = crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T>;
+
+ // SAFETY: The caller is responsible for the validity of string/size
+ let mut f = unsafe { crate::str::Formatter::from_buffer(string as *mut _, size) };
+
+ // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+ T::timeline_value_str(unsafe { &mut *p }, &mut f);
+ let _ = f.write_str("\0");
+
+ // SAFETY: `size` is at least 1 per the check above
+ unsafe { *string.add(size - 1) = 0 };
+}
+
+// Allow FenceObject<Self> to be used as a self argument, for ergonomics
+impl<T: FenceOps> core::ops::Receiver for FenceObject<T> {}
+
+/// A driver-specific DMA Fence Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a dma_fence and we own a reference to it.
+#[repr(C)]
+pub struct FenceObject<T: FenceOps> {
+ fence: bindings::dma_fence,
+ lock: Opaque<bindings::spinlock>,
+ inner: T,
+}
+
+impl<T: FenceOps> FenceObject<T> {
+ const SIZE: usize = core::mem::size_of::<Self>();
+
+ const VTABLE: bindings::dma_fence_ops = bindings::dma_fence_ops {
+ use_64bit_seqno: T::USE_64BIT_SEQNO,
+ get_driver_name: Some(get_driver_name_cb::<T>),
+ get_timeline_name: Some(get_timeline_name_cb::<T>),
+ enable_signaling: if T::HAS_ENABLE_SIGNALING {
+ Some(enable_signaling_cb::<T>)
+ } else {
+ None
+ },
+ signaled: if T::HAS_SIGNALED {
+ Some(signaled_cb::<T>)
+ } else {
+ None
+ },
+ wait: None, // Deprecated
+ release: Some(release_cb::<T>),
+ fence_value_str: if T::HAS_FENCE_VALUE_STR {
+ Some(fence_value_str_cb::<T>)
+ } else {
+ None
+ },
+ timeline_value_str: if T::HAS_TIMELINE_VALUE_STR {
+ Some(timeline_value_str_cb::<T>)
+ } else {
+ None
+ },
+ };
+}
+
+impl<T: FenceOps> Deref for FenceObject<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.inner
+ }
+}
+
+impl<T: FenceOps> DerefMut for FenceObject<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
+}
+
+impl<T: FenceOps> crate::private::Sealed for FenceObject<T> {}
+impl<T: FenceOps> RawDmaFence for FenceObject<T> {
+ fn raw(&self) -> *mut bindings::dma_fence {
+ &self.fence as *const _ as *mut _
+ }
+}
+
+/// A unique reference to a driver-specific fence object
+pub struct UniqueFence<T: FenceOps>(*mut FenceObject<T>);
+
+impl<T: FenceOps> Deref for UniqueFence<T> {
+ type Target = FenceObject<T>;
+
+ fn deref(&self) -> &FenceObject<T> {
+ unsafe { &*self.0 }
+ }
+}
+
+impl<T: FenceOps> DerefMut for UniqueFence<T> {
+ fn deref_mut(&mut self) -> &mut FenceObject<T> {
+ unsafe { &mut *self.0 }
+ }
+}
+
+impl<T: FenceOps> crate::private::Sealed for UniqueFence<T> {}
+impl<T: FenceOps> RawDmaFence for UniqueFence<T> {
+ fn raw(&self) -> *mut bindings::dma_fence {
+ unsafe { addr_of_mut!((*self.0).fence) }
+ }
+}
+
+impl<T: FenceOps> From<UniqueFence<T>> for UserFence<T> {
+ fn from(value: UniqueFence<T>) -> Self {
+ let ptr = value.0;
+ core::mem::forget(value);
+
+ UserFence(ptr)
+ }
+}
+
+impl<T: FenceOps> Drop for UniqueFence<T> {
+ fn drop(&mut self) {
+ // SAFETY: We own a reference to this fence.
+ unsafe { bindings::dma_fence_put(self.raw()) };
+ }
+}
+
+unsafe impl<T: FenceOps> Sync for UniqueFence<T> {}
+unsafe impl<T: FenceOps> Send for UniqueFence<T> {}
+
+/// A shared reference to a driver-specific fence object
+pub struct UserFence<T: FenceOps>(*mut FenceObject<T>);
+
+impl<T: FenceOps> Deref for UserFence<T> {
+ type Target = FenceObject<T>;
+
+ fn deref(&self) -> &FenceObject<T> {
+ unsafe { &*self.0 }
+ }
+}
+
+impl<T: FenceOps> Clone for UserFence<T> {
+ fn clone(&self) -> Self {
+ // SAFETY: `ptr` is valid per the type invariant and we own a reference to it.
+ unsafe {
+ bindings::dma_fence_get(self.raw());
+ Self(self.0)
+ }
+ }
+}
+
+impl<T: FenceOps> crate::private::Sealed for UserFence<T> {}
+impl<T: FenceOps> RawDmaFence for UserFence<T> {
+ fn raw(&self) -> *mut bindings::dma_fence {
+ unsafe { addr_of_mut!((*self.0).fence) }
+ }
+}
+
+impl<T: FenceOps> Drop for UserFence<T> {
+ fn drop(&mut self) {
+ // SAFETY: We own a reference to this fence.
+ unsafe { bindings::dma_fence_put(self.raw()) };
+ }
+}
+
+unsafe impl<T: FenceOps> Sync for UserFence<T> {}
+unsafe impl<T: FenceOps> Send for UserFence<T> {}
+
+/// An array of fence contexts, out of which fences can be created.
+pub struct FenceContexts {
+ start: u64,
+ count: u32,
+ seqnos: Vec<AtomicU64>,
+ lock_name: &'static CStr,
+ lock_key: &'static LockClassKey,
+}
+
+impl FenceContexts {
+ /// Create a new set of fence contexts.
+ pub fn new(
+ count: u32,
+ name: &'static CStr,
+ key: &'static LockClassKey,
+ ) -> Result<FenceContexts> {
+ let mut seqnos: Vec<AtomicU64> = Vec::new();
+
+ seqnos.try_reserve(count as usize)?;
+
+ for _ in 0..count {
+ seqnos.try_push(Default::default())?;
+ }
+
+ let start = unsafe { bindings::dma_fence_context_alloc(count as core::ffi::c_uint) };
+
+ Ok(FenceContexts {
+ start,
+ count,
+ seqnos,
+ lock_name: name,
+ lock_key: key,
+ })
+ }
+
+ /// Create a new fence in a given context index.
+ pub fn new_fence<T: FenceOps>(&self, context: u32, inner: T) -> Result<UniqueFence<T>> {
+ if context > self.count {
+ return Err(EINVAL);
+ }
+
+ let p = unsafe {
+ bindings::krealloc(
+ core::ptr::null_mut(),
+ FenceObject::<T>::SIZE,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ ) as *mut FenceObject<T>
+ };
+
+ if p.is_null() {
+ return Err(ENOMEM);
+ }
+
+ let seqno = self.seqnos[context as usize].fetch_add(1, Ordering::Relaxed);
+
+ // SAFETY: The pointer is valid, so pointers to members are too.
+ // After this, all fields are initialized.
+ unsafe {
+ addr_of_mut!((*p).inner).write(inner);
+ bindings::__spin_lock_init(
+ addr_of_mut!((*p).lock) as *mut _,
+ self.lock_name.as_char_ptr(),
+ self.lock_key.get(),
+ );
+ bindings::dma_fence_init(
+ addr_of_mut!((*p).fence),
+ &FenceObject::<T>::VTABLE,
+ addr_of_mut!((*p).lock) as *mut _,
+ self.start + context as u64,
+ seqno,
+ );
+ };
+
+ Ok(UniqueFence(p))
+ }
+}
+
+/// A DMA Fence Chain Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a dma_fence_chain which we own.
+pub struct FenceChain {
+ ptr: *mut bindings::dma_fence_chain,
+}
+
+impl FenceChain {
+ /// Create a new DmaFenceChain object.
+ pub fn new() -> Result<Self> {
+ // SAFETY: This function is safe to call and takes no arguments.
+ let ptr = unsafe { bindings::dma_fence_chain_alloc() };
+
+ if ptr.is_null() {
+ Err(ENOMEM)
+ } else {
+ Ok(FenceChain { ptr })
+ }
+ }
+
+ /// Convert the DmaFenceChain into the underlying raw pointer.
+ ///
+ /// This assumes the caller will take ownership of the object.
+ pub(crate) fn into_raw(self) -> *mut bindings::dma_fence_chain {
+ let ptr = self.ptr;
+ core::mem::forget(self);
+ ptr
+ }
+}
+
+impl Drop for FenceChain {
+ fn drop(&mut self) {
+ // SAFETY: We own this dma_fence_chain.
+ unsafe { bindings::dma_fence_chain_free(self.ptr) };
+ }
+}
diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs
new file mode 100644
index 000000000000..aa1441ae809b
--- /dev/null
+++ b/rust/kernel/driver.rs
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic support for drivers of different buses (e.g., PCI, Platform, Amba, etc.).
+//!
+//! Each bus/subsystem is expected to implement [`DriverOps`], which allows drivers to register
+//! using the [`Registration`] class.
+
+use crate::{error::code::*, error::Result, str::CStr, sync::Arc, ThisModule};
+use alloc::boxed::Box;
+use core::{cell::UnsafeCell, marker::PhantomData, ops::Deref, pin::Pin};
+
+/// A subsystem (e.g., PCI, Platform, Amba, etc.) that allows drivers to be written for it.
+pub trait DriverOps {
+ /// The type that holds information about the registration. This is typically a struct defined
+ /// by the C portion of the kernel.
+ type RegType: Default;
+
+ /// Registers a driver.
+ ///
+ /// # Safety
+ ///
+ /// `reg` must point to valid, initialised, and writable memory. It may be modified by this
+ /// function to hold registration state.
+ ///
+ /// On success, `reg` must remain pinned and valid until the matching call to
+ /// [`DriverOps::unregister`].
+ unsafe fn register(
+ reg: *mut Self::RegType,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result;
+
+ /// Unregisters a driver previously registered with [`DriverOps::register`].
+ ///
+ /// # Safety
+ ///
+ /// `reg` must point to valid writable memory, initialised by a previous successful call to
+ /// [`DriverOps::register`].
+ unsafe fn unregister(reg: *mut Self::RegType);
+}
+
+/// The registration of a driver.
+pub struct Registration<T: DriverOps> {
+ is_registered: bool,
+ concrete_reg: UnsafeCell<T::RegType>,
+}
+
+// SAFETY: `Registration` has no fields or methods accessible via `&Registration`, so it is safe to
+// share references to it with multiple threads as nothing can be done.
+unsafe impl<T: DriverOps> Sync for Registration<T> {}
+
+impl<T: DriverOps> Registration<T> {
+ /// Creates a new instance of the registration object.
+ pub fn new() -> Self {
+ Self {
+ is_registered: false,
+ concrete_reg: UnsafeCell::new(T::RegType::default()),
+ }
+ }
+
+ /// Allocates a pinned registration object and registers it.
+ ///
+ /// Returns a pinned heap-allocated representation of the registration.
+ pub fn new_pinned(name: &'static CStr, module: &'static ThisModule) -> Result<Pin<Box<Self>>> {
+ let mut reg = Pin::from(Box::try_new(Self::new())?);
+ reg.as_mut().register(name, module)?;
+ Ok(reg)
+ }
+
+ /// Registers a driver with its subsystem.
+ ///
+ /// It must be pinned because the memory block that represents the registration is potentially
+ /// self-referential.
+ pub fn register(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.is_registered {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: `concrete_reg` was initialised via its default constructor. It is only freed
+ // after `Self::drop` is called, which first calls `T::unregister`.
+ unsafe { T::register(this.concrete_reg.get(), name, module) }?;
+
+ this.is_registered = true;
+ Ok(())
+ }
+}
+
+impl<T: DriverOps> Default for Registration<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T: DriverOps> Drop for Registration<T> {
+ fn drop(&mut self) {
+ if self.is_registered {
+ // SAFETY: This path only runs if a previous call to `T::register` completed
+ // successfully.
+ unsafe { T::unregister(self.concrete_reg.get()) };
+ }
+ }
+}
+
+/// Conversion from a device id to a raw device id.
+///
+/// This is meant to be implemented by buses/subsystems so that they can use [`IdTable`] to
+/// guarantee (at compile-time) zero-termination of device id tables provided by drivers.
+///
+/// # Safety
+///
+/// Implementers must ensure that:
+/// - [`RawDeviceId::ZERO`] is actually a zeroed-out version of the raw device id.
+/// - [`RawDeviceId::to_rawid`] stores `offset` in the context/data field of the raw device id so
+/// that buses can recover the pointer to the data.
+#[const_trait]
+pub unsafe trait RawDeviceId {
+ /// The raw type that holds the device id.
+ ///
+ /// Id tables created from [`Self`] are going to hold this type in its zero-terminated array.
+ type RawType: Copy;
+
+ /// A zeroed-out representation of the raw device id.
+ ///
+ /// Id tables created from [`Self`] use [`Self::ZERO`] as the sentinel to indicate the end of
+ /// the table.
+ const ZERO: Self::RawType;
+
+ /// Converts an id into a raw id.
+ ///
+ /// `offset` is the offset from the memory location where the raw device id is stored to the
+ /// location where its associated context information is stored. Implementations must store
+ /// this in the appropriate context/data field of the raw type.
+ fn to_rawid(&self, offset: isize) -> Self::RawType;
+}
+
+/// A zero-terminated device id array.
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub struct IdArrayIds<T: RawDeviceId, const N: usize> {
+ ids: [T::RawType; N],
+ sentinel: T::RawType,
+}
+
+unsafe impl<T: RawDeviceId, const N: usize> Sync for IdArrayIds<T, N> {}
+
+/// A zero-terminated device id array, followed by context data.
+#[repr(C)]
+pub struct IdArray<T: RawDeviceId, U, const N: usize> {
+ ids: IdArrayIds<T, N>,
+ id_infos: [Option<U>; N],
+}
+
+impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
+ /// Creates a new instance of the array.
+ ///
+ /// The contents are derived from the given identifiers and context information.
+ pub const fn new(ids: [T; N], infos: [Option<U>; N]) -> Self
+ where
+ T: ~const RawDeviceId + Copy,
+ T::RawType: Copy + Clone,
+ {
+ let mut array = Self {
+ ids: IdArrayIds {
+ ids: [T::ZERO; N],
+ sentinel: T::ZERO,
+ },
+ id_infos: infos,
+ };
+ let mut i = 0usize;
+ while i < N {
+ // SAFETY: Both pointers are within `array` (or one byte beyond), consequently they are
+ // derived from the same allocated object. We are using a `u8` pointer, whose size 1,
+ // so the pointers are necessarily 1-byte aligned.
+ let offset = unsafe {
+ (&array.id_infos[i] as *const _ as *const u8)
+ .offset_from(&array.ids.ids[i] as *const _ as _)
+ };
+ array.ids.ids[i] = ids[i].to_rawid(offset);
+ i += 1;
+ }
+ array
+ }
+
+ /// Returns an `IdTable` backed by `self`.
+ ///
+ /// This is used to essentially erase the array size.
+ pub const fn as_table(&self) -> IdTable<'_, T, U> {
+ IdTable {
+ first: &self.ids.ids[0],
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns the number of items in the ID table.
+ pub const fn count(&self) -> usize {
+ self.ids.ids.len()
+ }
+
+ /// Returns the inner IdArrayIds array, without the context data.
+ pub const fn as_ids(&self) -> IdArrayIds<T, N>
+ where
+ T: ~const RawDeviceId + Copy,
+ {
+ self.ids
+ }
+}
+
+/// A device id table.
+///
+/// The table is guaranteed to be zero-terminated and to be followed by an array of context data of
+/// type `Option<U>`.
+pub struct IdTable<'a, T: RawDeviceId, U> {
+ first: &'a T::RawType,
+ _p: PhantomData<&'a U>,
+}
+
+impl<T: RawDeviceId, U> const AsRef<T::RawType> for IdTable<'_, T, U> {
+ fn as_ref(&self) -> &T::RawType {
+ self.first
+ }
+}
+
+/// Counts the number of parenthesis-delimited, comma-separated items.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::count_paren_items;
+///
+/// assert_eq!(0, count_paren_items!());
+/// assert_eq!(1, count_paren_items!((A)));
+/// assert_eq!(1, count_paren_items!((A),));
+/// assert_eq!(2, count_paren_items!((A), (B)));
+/// assert_eq!(2, count_paren_items!((A), (B),));
+/// assert_eq!(3, count_paren_items!((A), (B), (C)));
+/// assert_eq!(3, count_paren_items!((A), (B), (C),));
+/// ```
+#[macro_export]
+macro_rules! count_paren_items {
+ (($($item:tt)*), $($remaining:tt)*) => { 1 + $crate::count_paren_items!($($remaining)*) };
+ (($($item:tt)*)) => { 1 };
+ () => { 0 };
+}
+
+/// Converts a comma-separated list of pairs into an array with the first element. That is, it
+/// discards the second element of the pair.
+///
+/// Additionally, it automatically introduces a type if the first element is warpped in curly
+/// braces, for example, if it's `{v: 10}`, it becomes `X { v: 10 }`; this is to avoid repeating
+/// the type.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::first_item;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct X {
+/// v: u32,
+/// }
+///
+/// assert_eq!([] as [X; 0], first_item!(X, ));
+/// assert_eq!([X { v: 10 }], first_item!(X, ({ v: 10 }, Y)));
+/// assert_eq!([X { v: 10 }], first_item!(X, ({ v: 10 }, Y),));
+/// assert_eq!([X { v: 10 }], first_item!(X, (X { v: 10 }, Y)));
+/// assert_eq!([X { v: 10 }], first_item!(X, (X { v: 10 }, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y), ({v: 30}, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y), ({v: 30}, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y), (X {v: 30}, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y), (X {v: 30}, Y),));
+/// ```
+#[macro_export]
+macro_rules! first_item {
+ ($id_type:ty, $(({$($first:tt)*}, $second:expr)),* $(,)?) => {
+ {
+ type IdType = $id_type;
+ [$(IdType{$($first)*},)*]
+ }
+ };
+ ($id_type:ty, $(($first:expr, $second:expr)),* $(,)?) => { [$($first,)*] };
+}
+
+/// Converts a comma-separated list of pairs into an array with the second element. That is, it
+/// discards the first element of the pair.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::second_item;
+///
+/// assert_eq!([] as [u32; 0], second_item!());
+/// assert_eq!([10u32], second_item!((X, 10u32)));
+/// assert_eq!([10u32], second_item!((X, 10u32),));
+/// assert_eq!([10u32], second_item!(({ X }, 10u32)));
+/// assert_eq!([10u32], second_item!(({ X }, 10u32),));
+/// assert_eq!([10u32, 20], second_item!((X, 10u32), (X, 20)));
+/// assert_eq!([10u32, 20], second_item!((X, 10u32), (X, 20),));
+/// assert_eq!([10u32, 20], second_item!(({ X }, 10u32), ({ X }, 20)));
+/// assert_eq!([10u32, 20], second_item!(({ X }, 10u32), ({ X }, 20),));
+/// assert_eq!([10u32, 20, 30], second_item!((X, 10u32), (X, 20), (X, 30)));
+/// assert_eq!([10u32, 20, 30], second_item!((X, 10u32), (X, 20), (X, 30),));
+/// assert_eq!([10u32, 20, 30], second_item!(({ X }, 10u32), ({ X }, 20), ({ X }, 30)));
+/// assert_eq!([10u32, 20, 30], second_item!(({ X }, 10u32), ({ X }, 20), ({ X }, 30),));
+/// ```
+#[macro_export]
+macro_rules! second_item {
+ ($(({$($first:tt)*}, $second:expr)),* $(,)?) => { [$($second,)*] };
+ ($(($first:expr, $second:expr)),* $(,)?) => { [$($second,)*] };
+}
+
+/// Defines a new constant [`IdArray`] with a concise syntax.
+///
+/// It is meant to be used by buses and subsystems to create a similar macro with their device id
+/// type already specified, i.e., with fewer parameters to the end user.
+///
+/// # Examples
+///
+// TODO: Exported but not usable by kernel modules (requires `const_trait_impl`).
+/// ```ignore
+/// #![feature(const_trait_impl)]
+/// # use kernel::{define_id_array, driver::RawDeviceId};
+///
+/// #[derive(Copy, Clone)]
+/// struct Id(u32);
+///
+/// // SAFETY: `ZERO` is all zeroes and `to_rawid` stores `offset` as the second element of the raw
+/// // device id pair.
+/// unsafe impl const RawDeviceId for Id {
+/// type RawType = (u64, isize);
+/// const ZERO: Self::RawType = (0, 0);
+/// fn to_rawid(&self, offset: isize) -> Self::RawType {
+/// (self.0 as u64 + 1, offset)
+/// }
+/// }
+///
+/// define_id_array!(A1, Id, (), []);
+/// define_id_array!(A2, Id, &'static [u8], [(Id(10), None)]);
+/// define_id_array!(A3, Id, &'static [u8], [(Id(10), Some(b"id1")), ]);
+/// define_id_array!(A4, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2"))]);
+/// define_id_array!(A5, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2")), ]);
+/// define_id_array!(A6, Id, &'static [u8], [(Id(10), None), (Id(20), Some(b"id2")), ]);
+/// define_id_array!(A7, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), None), ]);
+/// define_id_array!(A8, Id, &'static [u8], [(Id(10), None), (Id(20), None), ]);
+///
+/// // Within a bus driver:
+/// driver_id_table!(BUS_ID_TABLE, Id, &'static [u8], A1);
+/// // At the top level:
+/// module_id_table!(MODULE_ID_TABLE, "mybus", Id, A1);
+/// ```
+#[macro_export]
+macro_rules! define_id_array {
+ ($table_name:ident, $id_type:ty, $data_type:ty, [ $($t:tt)* ]) => {
+ const $table_name:
+ $crate::driver::IdArray<$id_type, $data_type, { $crate::count_paren_items!($($t)*) }> =
+ $crate::driver::IdArray::new(
+ $crate::first_item!($id_type, $($t)*), $crate::second_item!($($t)*));
+ };
+}
+
+/// Declares an [`IdArray`] as an [`IdTable`] for a bus driver with a concise syntax.
+///
+/// It is meant to be used by buses and subsystems to create a similar macro with their device id
+/// type already specified, i.e., with fewer parameters to the end user.
+///
+/// # Examples
+///
+// TODO: Exported but not usable by kernel modules (requires `const_trait_impl`).
+/// ```ignore
+/// #![feature(const_trait_impl)]
+/// # use kernel::{driver_id_table};
+
+/// driver_id_table!(BUS_ID_TABLE, Id, &'static [u8], MY_ID_ARRAY);
+/// ```
+#[macro_export]
+macro_rules! driver_id_table {
+ ($table_name:ident, $id_type:ty, $data_type:ty, $target:expr) => {
+ const $table_name: Option<$crate::driver::IdTable<'static, $id_type, $data_type>> =
+ Some($target.as_table());
+ };
+}
+
+/// Declares an [`IdArray`] as a module-level ID tablewith a concise syntax.
+///
+/// It is meant to be used by buses and subsystems to create a similar macro with their device id
+/// type already specified, i.e., with fewer parameters to the end user.
+///
+/// # Examples
+///
+// TODO: Exported but not usable by kernel modules (requires `const_trait_impl`).
+/// ```ignore
+/// #![feature(const_trait_impl)]
+/// # use kernel::{driver_id_table};
+
+/// driver_id_table!(BUS_ID_TABLE, Id, &'static [u8], MY_ID_ARRAY);
+/// ```
+#[macro_export]
+macro_rules! module_id_table {
+ ($item_name:ident, $table_type:literal, $id_type:ty, $table_name:ident) => {
+ #[export_name = concat!("__mod_", $table_type, "__", stringify!($table_name), "_device_table")]
+ static $item_name: $crate::driver::IdArrayIds<$id_type, { $table_name.count() }> =
+ $table_name.as_ids();
+ };
+}
+
+/// Custom code within device removal.
+pub trait DeviceRemoval {
+ /// Cleans resources up when the device is removed.
+ ///
+ /// This is called when a device is removed and offers implementers the chance to run some code
+ /// that cleans state up.
+ fn device_remove(&self);
+}
+
+impl DeviceRemoval for () {
+ fn device_remove(&self) {}
+}
+
+impl<T: DeviceRemoval> DeviceRemoval for Arc<T> {
+ fn device_remove(&self) {
+ self.deref().device_remove();
+ }
+}
+
+impl<T: DeviceRemoval> DeviceRemoval for Box<T> {
+ fn device_remove(&self) {
+ self.deref().device_remove();
+ }
+}
+
+/// A kernel module that only registers the given driver on init.
+///
+/// This is a helper struct to make it easier to define single-functionality modules, in this case,
+/// modules that offer a single driver.
+pub struct Module<T: DriverOps> {
+ _driver: Pin<Box<Registration<T>>>,
+}
+
+impl<T: DriverOps> crate::Module for Module<T> {
+ fn init(name: &'static CStr, module: &'static ThisModule) -> Result<Self> {
+ Ok(Self {
+ _driver: Registration::new_pinned(name, module)?,
+ })
+ }
+}
+
+/// Declares a kernel module that exposes a single driver.
+///
+/// It is meant to be used as a helper by other subsystems so they can more easily expose their own
+/// macros.
+#[macro_export]
+macro_rules! module_driver {
+ (<$gen_type:ident>, $driver_ops:ty, { type: $type:ty, $($f:tt)* }) => {
+ type Ops<$gen_type> = $driver_ops;
+ type ModuleType = $crate::driver::Module<Ops<$type>>;
+ $crate::prelude::module! {
+ type: ModuleType,
+ $($f)*
+ }
+ }
+}
diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
new file mode 100644
index 000000000000..6007f941137a
--- /dev/null
+++ b/rust/kernel/drm/device.rs
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM device.
+//!
+//! C header: [`include/linux/drm/drm_device.h`](../../../../include/linux/drm/drm_device.h)
+
+use crate::{bindings, device, drm, types::ForeignOwnable};
+use core::marker::PhantomData;
+
+/// Represents a reference to a DRM device. The device is reference-counted and is guaranteed to
+/// not be dropped while this object is alive.
+pub struct Device<T: drm::drv::Driver> {
+ // Type invariant: ptr must be a valid and initialized drm_device,
+ // and this value must either own a reference to it or the caller
+ // must ensure that it is never dropped if the reference is borrowed.
+ pub(super) ptr: *mut bindings::drm_device,
+ _p: PhantomData<T>,
+}
+
+impl<T: drm::drv::Driver> Device<T> {
+ // Not intended to be called externally, except via declare_drm_ioctls!()
+ #[doc(hidden)]
+ pub unsafe fn from_raw(raw: *mut bindings::drm_device) -> Device<T> {
+ Device {
+ ptr: raw,
+ _p: PhantomData,
+ }
+ }
+
+ #[allow(dead_code)]
+ pub(crate) fn raw(&self) -> *const bindings::drm_device {
+ self.ptr
+ }
+
+ pub(crate) fn raw_mut(&mut self) -> *mut bindings::drm_device {
+ self.ptr
+ }
+
+ /// Returns a borrowed reference to the user data associated with this Device.
+ pub fn data(&self) -> <T::Data as ForeignOwnable>::Borrowed<'_> {
+ unsafe { T::Data::borrow((*self.ptr).dev_private) }
+ }
+}
+
+impl<T: drm::drv::Driver> Drop for Device<T> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // relinquish it now.
+ unsafe { bindings::drm_dev_put(self.ptr) };
+ }
+}
+
+impl<T: drm::drv::Driver> Clone for Device<T> {
+ fn clone(&self) -> Self {
+ // SAFETY: We get a new reference and then create a new owning object from the raw pointer
+ unsafe {
+ bindings::drm_dev_get(self.ptr);
+ Device::from_raw(self.ptr)
+ }
+ }
+}
+
+// SAFETY: `Device` only holds a pointer to a C device, which is safe to be used from any thread.
+unsafe impl<T: drm::drv::Driver> Send for Device<T> {}
+
+// SAFETY: `Device` only holds a pointer to a C device, references to which are safe to be used
+// from any thread.
+unsafe impl<T: drm::drv::Driver> Sync for Device<T> {}
+
+// Make drm::Device work for dev_info!() and friends
+unsafe impl<T: drm::drv::Driver> device::RawDevice for Device<T> {
+ fn raw_device(&self) -> *mut bindings::device {
+ // SAFETY: ptr must be valid per the type invariant
+ unsafe { (*self.ptr).dev }
+ }
+}
diff --git a/rust/kernel/drm/drv.rs b/rust/kernel/drm/drv.rs
new file mode 100644
index 000000000000..c138352cb489
--- /dev/null
+++ b/rust/kernel/drm/drv.rs
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM driver core.
+//!
+//! C header: [`include/linux/drm/drm_drv.h`](../../../../include/linux/drm/drm_drv.h)
+
+use crate::{
+ bindings, device, drm,
+ error::code::*,
+ error::from_kernel_err_ptr,
+ error::{Error, Result},
+ prelude::*,
+ private::Sealed,
+ str::CStr,
+ types::ForeignOwnable,
+ ThisModule,
+};
+use core::{
+ marker::{PhantomData, PhantomPinned},
+ pin::Pin,
+};
+use macros::vtable;
+
+/// Driver use the GEM memory manager. This should be set for all modern drivers.
+pub const FEAT_GEM: u32 = bindings::drm_driver_feature_DRIVER_GEM;
+/// Driver supports mode setting interfaces (KMS).
+pub const FEAT_MODESET: u32 = bindings::drm_driver_feature_DRIVER_MODESET;
+/// Driver supports dedicated render nodes.
+pub const FEAT_RENDER: u32 = bindings::drm_driver_feature_DRIVER_RENDER;
+/// Driver supports the full atomic modesetting userspace API.
+///
+/// Drivers which only use atomic internally, but do not support the full userspace API (e.g. not
+/// all properties converted to atomic, or multi-plane updates are not guaranteed to be tear-free)
+/// should not set this flag.
+pub const FEAT_ATOMIC: u32 = bindings::drm_driver_feature_DRIVER_ATOMIC;
+/// Driver supports DRM sync objects for explicit synchronization of command submission.
+pub const FEAT_SYNCOBJ: u32 = bindings::drm_driver_feature_DRIVER_SYNCOBJ;
+/// Driver supports the timeline flavor of DRM sync objects for explicit synchronization of command
+/// submission.
+pub const FEAT_SYNCOBJ_TIMELINE: u32 = bindings::drm_driver_feature_DRIVER_SYNCOBJ_TIMELINE;
+
+/// Information data for a DRM Driver.
+pub struct DriverInfo {
+ /// Driver major version.
+ pub major: i32,
+ /// Driver minor version.
+ pub minor: i32,
+ /// Driver patchlevel version.
+ pub patchlevel: i32,
+ /// Driver name.
+ pub name: &'static CStr,
+ /// Driver description.
+ pub desc: &'static CStr,
+ /// Driver date.
+ pub date: &'static CStr,
+}
+
+/// Internal memory management operation set, normally created by memory managers (e.g. GEM).
+///
+/// See `kernel::drm::gem` and `kernel::drm::gem::shmem`.
+pub struct AllocOps {
+ pub(crate) gem_create_object: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ size: usize,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) prime_handle_to_fd: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ file_priv: *mut bindings::drm_file,
+ handle: u32,
+ flags: u32,
+ prime_fd: *mut core::ffi::c_int,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) prime_fd_to_handle: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ file_priv: *mut bindings::drm_file,
+ prime_fd: core::ffi::c_int,
+ handle: *mut u32,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) gem_prime_import: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ dma_buf: *mut bindings::dma_buf,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) gem_prime_import_sg_table: Option<
+ unsafe extern "C" fn(
+ dev: *mut bindings::drm_device,
+ attach: *mut bindings::dma_buf_attachment,
+ sgt: *mut bindings::sg_table,
+ ) -> *mut bindings::drm_gem_object,
+ >,
+ pub(crate) gem_prime_mmap: Option<
+ unsafe extern "C" fn(
+ obj: *mut bindings::drm_gem_object,
+ vma: *mut bindings::vm_area_struct,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) dumb_create: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ args: *mut bindings::drm_mode_create_dumb,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) dumb_map_offset: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ handle: u32,
+ offset: *mut u64,
+ ) -> core::ffi::c_int,
+ >,
+ pub(crate) dumb_destroy: Option<
+ unsafe extern "C" fn(
+ file_priv: *mut bindings::drm_file,
+ dev: *mut bindings::drm_device,
+ handle: u32,
+ ) -> core::ffi::c_int,
+ >,
+}
+
+/// Trait for memory manager implementations. Implemented internally.
+pub trait AllocImpl: Sealed + drm::gem::IntoGEMObject {
+ /// The C callback operations for this memory manager.
+ const ALLOC_OPS: AllocOps;
+}
+
+/// A DRM driver implementation.
+#[vtable]
+pub trait Driver {
+ /// Context data associated with the DRM driver
+ ///
+ /// Determines the type of the context data passed to each of the methods of the trait.
+ type Data: ForeignOwnable + Sync + Send;
+
+ /// The type used to manage memory for this driver.
+ ///
+ /// Should be either `drm::gem::Object<T>` or `drm::gem::shmem::Object<T>`.
+ type Object: AllocImpl;
+
+ /// The type used to represent a DRM File (client)
+ type File: drm::file::DriverFile;
+
+ /// Driver metadata
+ const INFO: DriverInfo;
+
+ /// Feature flags
+ const FEATURES: u32;
+
+ /// IOCTL list. See `kernel::drm::ioctl::declare_drm_ioctls!{}`.
+ const IOCTLS: &'static [drm::ioctl::DrmIoctlDescriptor];
+}
+
+/// A registration of a DRM device
+///
+/// # Invariants:
+///
+/// drm is always a valid pointer to an allocated drm_device
+pub struct Registration<T: Driver> {
+ drm: drm::device::Device<T>,
+ registered: bool,
+ fops: bindings::file_operations,
+ vtable: Pin<Box<bindings::drm_driver>>,
+ _p: PhantomData<T>,
+ _pin: PhantomPinned,
+}
+
+#[cfg(CONFIG_DRM_LEGACY)]
+macro_rules! drm_legacy_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::drm_driver {
+ $( $field: $val ),*,
+ firstopen: None,
+ preclose: None,
+ dma_ioctl: None,
+ dma_quiescent: None,
+ context_dtor: None,
+ irq_handler: None,
+ irq_preinstall: None,
+ irq_postinstall: None,
+ irq_uninstall: None,
+ get_vblank_counter: None,
+ enable_vblank: None,
+ disable_vblank: None,
+ dev_priv_size: 0,
+ }
+ }
+}
+
+#[cfg(not(CONFIG_DRM_LEGACY))]
+macro_rules! drm_legacy_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::drm_driver {
+ $( $field: $val ),*
+ }
+ }
+}
+
+/// Registers a DRM device with the rest of the kernel.
+///
+/// It automatically picks up THIS_MODULE.
+#[allow(clippy::crate_in_macro_def)]
+#[macro_export]
+macro_rules! drm_device_register {
+ ($reg:expr, $data:expr, $flags:expr $(,)?) => {{
+ $crate::drm::drv::Registration::register($reg, $data, $flags, &crate::THIS_MODULE)
+ }};
+}
+
+impl<T: Driver> Registration<T> {
+ const VTABLE: bindings::drm_driver = drm_legacy_fields! {
+ load: None,
+ open: Some(drm::file::open_callback::<T::File>),
+ postclose: Some(drm::file::postclose_callback::<T::File>),
+ lastclose: None,
+ unload: None,
+ release: None,
+ master_set: None,
+ master_drop: None,
+ debugfs_init: None,
+ gem_create_object: T::Object::ALLOC_OPS.gem_create_object,
+ prime_handle_to_fd: T::Object::ALLOC_OPS.prime_handle_to_fd,
+ prime_fd_to_handle: T::Object::ALLOC_OPS.prime_fd_to_handle,
+ gem_prime_import: T::Object::ALLOC_OPS.gem_prime_import,
+ gem_prime_import_sg_table: T::Object::ALLOC_OPS.gem_prime_import_sg_table,
+ gem_prime_mmap: T::Object::ALLOC_OPS.gem_prime_mmap,
+ dumb_create: T::Object::ALLOC_OPS.dumb_create,
+ dumb_map_offset: T::Object::ALLOC_OPS.dumb_map_offset,
+ dumb_destroy: T::Object::ALLOC_OPS.dumb_destroy,
+
+ major: T::INFO.major,
+ minor: T::INFO.minor,
+ patchlevel: T::INFO.patchlevel,
+ name: T::INFO.name.as_char_ptr() as *mut _,
+ desc: T::INFO.desc.as_char_ptr() as *mut _,
+ date: T::INFO.date.as_char_ptr() as *mut _,
+
+ driver_features: T::FEATURES,
+ ioctls: T::IOCTLS.as_ptr(),
+ num_ioctls: T::IOCTLS.len() as i32,
+ fops: core::ptr::null_mut(),
+ };
+
+ /// Creates a new [`Registration`] but does not register it yet.
+ ///
+ /// It is allowed to move.
+ pub fn new(parent: &dyn device::RawDevice) -> Result<Self> {
+ let vtable = Pin::new(Box::try_new(Self::VTABLE)?);
+ let raw_drm = unsafe { bindings::drm_dev_alloc(&*vtable, parent.raw_device()) };
+ let raw_drm = from_kernel_err_ptr(raw_drm)?;
+
+ // The reference count is one, and now we take ownership of that reference as a
+ // drm::device::Device.
+ let drm = unsafe { drm::device::Device::from_raw(raw_drm) };
+
+ Ok(Self {
+ drm,
+ registered: false,
+ vtable,
+ fops: drm::gem::create_fops(),
+ _pin: PhantomPinned,
+ _p: PhantomData,
+ })
+ }
+
+ /// Registers a DRM device with the rest of the kernel.
+ ///
+ /// Users are encouraged to use the [`drm_device_register!()`] macro because it automatically
+ /// picks up the current module.
+ pub fn register(
+ self: Pin<&mut Self>,
+ data: T::Data,
+ flags: usize,
+ module: &'static ThisModule,
+ ) -> Result {
+ if self.registered {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ let data_pointer = <T::Data as ForeignOwnable>::into_foreign(data);
+ // SAFETY: `drm` is valid per the type invariant
+ unsafe {
+ (*this.drm.raw_mut()).dev_private = data_pointer as *mut _;
+ }
+
+ this.fops.owner = module.0;
+ this.vtable.fops = &this.fops;
+
+ // SAFETY: The device is now initialized and ready to be registered.
+ let ret = unsafe { bindings::drm_dev_register(this.drm.raw_mut(), flags as u64) };
+ if ret < 0 {
+ // SAFETY: `data_pointer` was returned by `into_foreign` above.
+ unsafe { T::Data::from_foreign(data_pointer) };
+ return Err(Error::from_kernel_errno(ret));
+ }
+
+ this.registered = true;
+ Ok(())
+ }
+
+ /// Returns a reference to the `Device` instance for this registration.
+ pub fn device(&self) -> &drm::device::Device<T> {
+ &self.drm
+ }
+}
+
+// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between threads
+// or CPUs, so it is safe to share it.
+unsafe impl<T: Driver> Sync for Registration<T> {}
+
+// SAFETY: Registration with and unregistration from the drm subsystem can happen from any thread.
+// Additionally, `T::Data` (which is dropped during unregistration) is `Send`, so it is ok to move
+// `Registration` to different threads.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: Driver> Send for Registration<T> {}
+
+impl<T: Driver> Drop for Registration<T> {
+ /// Removes the registration from the kernel if it has completed successfully before.
+ fn drop(&mut self) {
+ if self.registered {
+ // Get a pointer to the data stored in device before destroying it.
+ // SAFETY: `drm` is valid per the type invariant
+ let data_pointer = unsafe { (*self.drm.raw_mut()).dev_private };
+
+ // SAFETY: Since `registered` is true, `self.drm` is both valid and registered.
+ unsafe { bindings::drm_dev_unregister(self.drm.raw_mut()) };
+
+ // Free data as well.
+ // SAFETY: `data_pointer` was returned by `into_foreign` during registration.
+ unsafe { <T::Data as ForeignOwnable>::from_foreign(data_pointer) };
+ }
+ }
+}
diff --git a/rust/kernel/drm/file.rs b/rust/kernel/drm/file.rs
new file mode 100644
index 000000000000..48751e93c38a
--- /dev/null
+++ b/rust/kernel/drm/file.rs
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM File objects.
+//!
+//! C header: [`include/linux/drm/drm_file.h`](../../../../include/linux/drm/drm_file.h)
+
+use crate::{bindings, drm, error::Result};
+use alloc::boxed::Box;
+use core::marker::PhantomData;
+use core::ops::Deref;
+
+/// Trait that must be implemented by DRM drivers to represent a DRM File (a client instance).
+pub trait DriverFile {
+ /// The parent `Driver` implementation for this `DriverFile`.
+ type Driver: drm::drv::Driver;
+
+ /// Open a new file (called when a client opens the DRM device).
+ fn open(device: &drm::device::Device<Self::Driver>) -> Result<Box<Self>>;
+}
+
+/// An open DRM File.
+///
+/// # Invariants
+/// `raw` is a valid pointer to a `drm_file` struct.
+#[repr(transparent)]
+pub struct File<T: DriverFile> {
+ raw: *mut bindings::drm_file,
+ _p: PhantomData<T>,
+}
+
+pub(super) unsafe extern "C" fn open_callback<T: DriverFile>(
+ raw_dev: *mut bindings::drm_device,
+ raw_file: *mut bindings::drm_file,
+) -> core::ffi::c_int {
+ let drm = core::mem::ManuallyDrop::new(unsafe { drm::device::Device::from_raw(raw_dev) });
+ // SAFETY: This reference won't escape this function
+ let file = unsafe { &mut *raw_file };
+
+ let inner = match T::open(&drm) {
+ Err(e) => {
+ return e.to_kernel_errno();
+ }
+ Ok(i) => i,
+ };
+
+ file.driver_priv = Box::into_raw(inner) as *mut _;
+
+ 0
+}
+
+pub(super) unsafe extern "C" fn postclose_callback<T: DriverFile>(
+ _dev: *mut bindings::drm_device,
+ raw_file: *mut bindings::drm_file,
+) {
+ // SAFETY: This reference won't escape this function
+ let file = unsafe { &*raw_file };
+
+ // Drop the DriverFile
+ unsafe { Box::from_raw(file.driver_priv as *mut T) };
+}
+
+impl<T: DriverFile> File<T> {
+ // Not intended to be called externally, except via declare_drm_ioctls!()
+ #[doc(hidden)]
+ pub unsafe fn from_raw(raw_file: *mut bindings::drm_file) -> File<T> {
+ File {
+ raw: raw_file,
+ _p: PhantomData,
+ }
+ }
+
+ #[allow(dead_code)]
+ /// Return the raw pointer to the underlying `drm_file`.
+ pub(super) fn raw(&self) -> *const bindings::drm_file {
+ self.raw
+ }
+
+ /// Return an immutable reference to the raw `drm_file` structure.
+ pub(super) fn file(&self) -> &bindings::drm_file {
+ unsafe { &*self.raw }
+ }
+}
+
+impl<T: DriverFile> Deref for File<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*(self.file().driver_priv as *const T) }
+ }
+}
+
+impl<T: DriverFile> crate::private::Sealed for File<T> {}
+
+/// Generic trait to allow users that don't care about driver specifics to accept any File<T>.
+///
+/// # Safety
+/// Must only be implemented for File<T> and return the pointer, following the normal invariants
+/// of that type.
+pub unsafe trait GenericFile: crate::private::Sealed {
+ /// Returns the raw const pointer to the `struct drm_file`
+ fn raw(&self) -> *const bindings::drm_file;
+ /// Returns the raw mut pointer to the `struct drm_file`
+ fn raw_mut(&mut self) -> *mut bindings::drm_file;
+}
+
+unsafe impl<T: DriverFile> GenericFile for File<T> {
+ fn raw(&self) -> *const bindings::drm_file {
+ self.raw
+ }
+ fn raw_mut(&mut self) -> *mut bindings::drm_file {
+ self.raw
+ }
+}
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
new file mode 100644
index 000000000000..c4a42bb2f718
--- /dev/null
+++ b/rust/kernel/drm/gem/mod.rs
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM GEM API
+//!
+//! C header: [`include/linux/drm/drm_gem.h`](../../../../include/linux/drm/drm_gem.h)
+
+#[cfg(CONFIG_DRM_GEM_SHMEM_HELPER = "y")]
+pub mod shmem;
+
+use alloc::boxed::Box;
+
+use crate::{
+ bindings,
+ drm::{device, drv, file},
+ error::{to_result, Result},
+ prelude::*,
+};
+use core::{mem, mem::ManuallyDrop, ops::Deref, ops::DerefMut};
+
+/// GEM object functions, which must be implemented by drivers.
+pub trait BaseDriverObject<T: BaseObject>: Sync + Send + Sized {
+ /// Create a new driver data object for a GEM object of a given size.
+ fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<Self>;
+
+ /// Open a new handle to an existing object, associated with a File.
+ fn open(
+ _obj: &<<T as IntoGEMObject>::Driver as drv::Driver>::Object,
+ _file: &file::File<<<T as IntoGEMObject>::Driver as drv::Driver>::File>,
+ ) -> Result {
+ Ok(())
+ }
+
+ /// Close a handle to an existing object, associated with a File.
+ fn close(
+ _obj: &<<T as IntoGEMObject>::Driver as drv::Driver>::Object,
+ _file: &file::File<<<T as IntoGEMObject>::Driver as drv::Driver>::File>,
+ ) {
+ }
+}
+
+/// Trait that represents a GEM object subtype
+pub trait IntoGEMObject: Sized + crate::private::Sealed {
+ /// Owning driver for this type
+ type Driver: drv::Driver;
+
+ /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
+ /// this owning object is valid.
+ fn gem_obj(&self) -> &bindings::drm_gem_object;
+
+ /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
+ /// this owning object is valid.
+ fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object;
+
+ /// Converts a pointer to a `drm_gem_object` into a pointer to this type.
+ fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Self;
+}
+
+/// Trait which must be implemented by drivers using base GEM objects.
+pub trait DriverObject: BaseDriverObject<Object<Self>> {
+ /// Parent `Driver` for this object.
+ type Driver: drv::Driver;
+}
+
+unsafe extern "C" fn free_callback<T: DriverObject>(obj: *mut bindings::drm_gem_object) {
+ // SAFETY: All of our objects are Object<T>.
+ let this = crate::container_of!(obj, Object<T>, obj) as *mut Object<T>;
+
+ // SAFETY: The pointer we got has to be valid
+ unsafe { bindings::drm_gem_object_release(obj) };
+
+ // SAFETY: All of our objects are allocated via Box<>, and we're in the
+ // free callback which guarantees this object has zero remaining references,
+ // so we can drop it
+ unsafe { Box::from_raw(this) };
+}
+
+unsafe extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
+ raw_obj: *mut bindings::drm_gem_object,
+ raw_file: *mut bindings::drm_file,
+) -> core::ffi::c_int {
+ // SAFETY: The pointer we got has to be valid.
+ let file = unsafe {
+ file::File::<<<U as IntoGEMObject>::Driver as drv::Driver>::File>::from_raw(raw_file)
+ };
+ let obj =
+ <<<U as IntoGEMObject>::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(
+ raw_obj,
+ );
+
+ // SAFETY: from_gem_obj() returns a valid pointer as long as the type is
+ // correct and the raw_obj we got is valid.
+ match T::open(unsafe { &*obj }, &file) {
+ Err(e) => e.to_kernel_errno(),
+ Ok(()) => 0,
+ }
+}
+
+unsafe extern "C" fn close_callback<T: BaseDriverObject<U>, U: BaseObject>(
+ raw_obj: *mut bindings::drm_gem_object,
+ raw_file: *mut bindings::drm_file,
+) {
+ // SAFETY: The pointer we got has to be valid.
+ let file = unsafe {
+ file::File::<<<U as IntoGEMObject>::Driver as drv::Driver>::File>::from_raw(raw_file)
+ };
+ let obj =
+ <<<U as IntoGEMObject>::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(
+ raw_obj,
+ );
+
+ // SAFETY: from_gem_obj() returns a valid pointer as long as the type is
+ // correct and the raw_obj we got is valid.
+ T::close(unsafe { &*obj }, &file);
+}
+
+impl<T: DriverObject> IntoGEMObject for Object<T> {
+ type Driver = T::Driver;
+
+ fn gem_obj(&self) -> &bindings::drm_gem_object {
+ &self.obj
+ }
+
+ fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object {
+ &mut self.obj
+ }
+
+ fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Object<T> {
+ crate::container_of!(obj, Object<T>, obj) as *mut Object<T>
+ }
+}
+
+/// Base operations shared by all GEM object classes
+pub trait BaseObject: IntoGEMObject {
+ /// Returns the size of the object in bytes.
+ fn size(&self) -> usize {
+ self.gem_obj().size
+ }
+
+ /// Sets the exportable flag, which controls whether the object can be exported via PRIME.
+ fn set_exportable(&mut self, exportable: bool) {
+ self.mut_gem_obj().exportable = exportable;
+ }
+
+ /// Creates a new reference to the object.
+ fn reference(&self) -> ObjectRef<Self> {
+ // SAFETY: Having a reference to an Object implies holding a GEM reference
+ unsafe {
+ bindings::drm_gem_object_get(self.gem_obj() as *const _ as *mut _);
+ }
+ ObjectRef {
+ ptr: self as *const _,
+ }
+ }
+
+ /// Creates a new handle for the object associated with a given `File`
+ /// (or returns an existing one).
+ fn create_handle(
+ &self,
+ file: &file::File<<<Self as IntoGEMObject>::Driver as drv::Driver>::File>,
+ ) -> Result<u32> {
+ let mut handle: u32 = 0;
+ // SAFETY: The arguments are all valid per the type invariants.
+ to_result(unsafe {
+ bindings::drm_gem_handle_create(
+ file.raw() as *mut _,
+ self.gem_obj() as *const _ as *mut _,
+ &mut handle,
+ )
+ })?;
+ Ok(handle)
+ }
+
+ /// Looks up an object by its handle for a given `File`.
+ fn lookup_handle(
+ file: &file::File<<<Self as IntoGEMObject>::Driver as drv::Driver>::File>,
+ handle: u32,
+ ) -> Result<ObjectRef<Self>> {
+ // SAFETY: The arguments are all valid per the type invariants.
+ let ptr = unsafe { bindings::drm_gem_object_lookup(file.raw() as *mut _, handle) };
+
+ if ptr.is_null() {
+ Err(ENOENT)
+ } else {
+ Ok(ObjectRef {
+ ptr: ptr as *const _,
+ })
+ }
+ }
+
+ /// Creates an mmap offset to map the object from userspace.
+ fn create_mmap_offset(&self) -> Result<u64> {
+ // SAFETY: The arguments are valid per the type invariant.
+ to_result(unsafe {
+ // TODO: is this threadsafe?
+ bindings::drm_gem_create_mmap_offset(self.gem_obj() as *const _ as *mut _)
+ })?;
+ Ok(unsafe {
+ bindings::drm_vma_node_offset_addr(&self.gem_obj().vma_node as *const _ as *mut _)
+ })
+ }
+}
+
+impl<T: IntoGEMObject> BaseObject for T {}
+
+/// A base GEM object.
+#[repr(C)]
+pub struct Object<T: DriverObject> {
+ obj: bindings::drm_gem_object,
+ // The DRM core ensures the Device exists as long as its objects exist, so we don't need to
+ // manage the reference count here.
+ dev: ManuallyDrop<device::Device<T::Driver>>,
+ inner: T,
+}
+
+impl<T: DriverObject> Object<T> {
+ /// The size of this object's structure.
+ pub const SIZE: usize = mem::size_of::<Self>();
+
+ const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
+ free: Some(free_callback::<T>),
+ open: Some(open_callback::<T, Object<T>>),
+ close: Some(close_callback::<T, Object<T>>),
+ print_info: None,
+ export: None,
+ pin: None,
+ unpin: None,
+ get_sg_table: None,
+ vmap: None,
+ vunmap: None,
+ mmap: None,
+ vm_ops: core::ptr::null_mut(),
+ };
+
+ /// Create a new GEM object.
+ pub fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<UniqueObjectRef<Self>> {
+ let mut obj: Box<Self> = Box::try_new(Self {
+ // SAFETY: This struct is expected to be zero-initialized
+ obj: unsafe { mem::zeroed() },
+ // SAFETY: The drm subsystem guarantees that the drm_device will live as long as
+ // the GEM object lives, so we can conjure a reference out of thin air.
+ dev: ManuallyDrop::new(unsafe { device::Device::from_raw(dev.ptr) }),
+ inner: T::new(dev, size)?,
+ })?;
+
+ obj.obj.funcs = &Self::OBJECT_FUNCS;
+ to_result(unsafe {
+ bindings::drm_gem_object_init(dev.raw() as *mut _, &mut obj.obj, size)
+ })?;
+
+ let obj_ref = UniqueObjectRef {
+ ptr: Box::leak(obj),
+ };
+
+ Ok(obj_ref)
+ }
+
+ /// Returns the `Device` that owns this GEM object.
+ pub fn dev(&self) -> &device::Device<T::Driver> {
+ &self.dev
+ }
+}
+
+impl<T: DriverObject> crate::private::Sealed for Object<T> {}
+
+impl<T: DriverObject> Deref for Object<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<T: DriverObject> DerefMut for Object<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+impl<T: DriverObject> drv::AllocImpl for Object<T> {
+ const ALLOC_OPS: drv::AllocOps = drv::AllocOps {
+ gem_create_object: None,
+ prime_handle_to_fd: Some(bindings::drm_gem_prime_handle_to_fd),
+ prime_fd_to_handle: Some(bindings::drm_gem_prime_fd_to_handle),
+ gem_prime_import: None,
+ gem_prime_import_sg_table: None,
+ gem_prime_mmap: Some(bindings::drm_gem_prime_mmap),
+ dumb_create: None,
+ dumb_map_offset: None,
+ dumb_destroy: None,
+ };
+}
+
+/// A reference-counted shared reference to a base GEM object.
+pub struct ObjectRef<T: IntoGEMObject> {
+ // Invariant: the pointer is valid and initialized, and this ObjectRef owns a reference to it.
+ ptr: *const T,
+}
+
+/// SAFETY: GEM object references are safe to share between threads.
+unsafe impl<T: IntoGEMObject> Send for ObjectRef<T> {}
+unsafe impl<T: IntoGEMObject> Sync for ObjectRef<T> {}
+
+impl<T: IntoGEMObject> Clone for ObjectRef<T> {
+ fn clone(&self) -> Self {
+ self.reference()
+ }
+}
+
+impl<T: IntoGEMObject> Drop for ObjectRef<T> {
+ fn drop(&mut self) {
+ // SAFETY: Having an ObjectRef implies holding a GEM reference.
+ // The free callback will take care of deallocation.
+ unsafe {
+ bindings::drm_gem_object_put((*self.ptr).gem_obj() as *const _ as *mut _);
+ }
+ }
+}
+
+impl<T: IntoGEMObject> Deref for ObjectRef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The pointer is valid per the invariant
+ unsafe { &*self.ptr }
+ }
+}
+
+/// A unique reference to a base GEM object.
+pub struct UniqueObjectRef<T: IntoGEMObject> {
+ // Invariant: the pointer is valid and initialized, and this ObjectRef owns the only reference
+ // to it.
+ ptr: *mut T,
+}
+
+impl<T: IntoGEMObject> UniqueObjectRef<T> {
+ /// Downgrade this reference to a shared reference.
+ pub fn into_ref(self) -> ObjectRef<T> {
+ let ptr = self.ptr as *const _;
+ core::mem::forget(self);
+
+ ObjectRef { ptr }
+ }
+}
+
+impl<T: IntoGEMObject> Drop for UniqueObjectRef<T> {
+ fn drop(&mut self) {
+ // SAFETY: Having a UniqueObjectRef implies holding a GEM
+ // reference. The free callback will take care of deallocation.
+ unsafe {
+ bindings::drm_gem_object_put((*self.ptr).gem_obj() as *const _ as *mut _);
+ }
+ }
+}
+
+impl<T: IntoGEMObject> Deref for UniqueObjectRef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The pointer is valid per the invariant
+ unsafe { &*self.ptr }
+ }
+}
+
+impl<T: IntoGEMObject> DerefMut for UniqueObjectRef<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: The pointer is valid per the invariant
+ unsafe { &mut *self.ptr }
+ }
+}
+
+pub(super) fn create_fops() -> bindings::file_operations {
+ bindings::file_operations {
+ owner: core::ptr::null_mut(),
+ open: Some(bindings::drm_open),
+ release: Some(bindings::drm_release),
+ unlocked_ioctl: Some(bindings::drm_ioctl),
+ #[cfg(CONFIG_COMPAT)]
+ compat_ioctl: Some(bindings::drm_compat_ioctl),
+ #[cfg(not(CONFIG_COMPAT))]
+ compat_ioctl: None,
+ poll: Some(bindings::drm_poll),
+ read: Some(bindings::drm_read),
+ llseek: Some(bindings::noop_llseek),
+ mmap: Some(bindings::drm_gem_mmap),
+ ..Default::default()
+ }
+}
diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs
new file mode 100644
index 000000000000..8f17eba0be99
--- /dev/null
+++ b/rust/kernel/drm/gem/shmem.rs
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! DRM GEM shmem helper objects
+//!
+//! C header: [`include/linux/drm/drm_gem_shmem_helper.h`](../../../../include/linux/drm/drm_gem_shmem_helper.h)
+
+use crate::drm::{device, drv, gem};
+use crate::{
+ error::{from_kernel_err_ptr, to_result},
+ prelude::*,
+};
+use core::{
+ marker::PhantomData,
+ mem,
+ mem::{ManuallyDrop, MaybeUninit},
+ ops::{Deref, DerefMut},
+ ptr::addr_of_mut,
+ slice,
+};
+
+use gem::BaseObject;
+
+/// Trait which must be implemented by drivers using shmem-backed GEM objects.
+pub trait DriverObject: gem::BaseDriverObject<Object<Self>> {
+ /// Parent `Driver` for this object.
+ type Driver: drv::Driver;
+}
+
+// FIXME: This is terrible and I don't know how to avoid it
+#[cfg(CONFIG_NUMA)]
+macro_rules! vm_numa_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::vm_operations_struct {
+ $( $field: $val ),*,
+ set_policy: None,
+ get_policy: None,
+ }
+ }
+}
+
+#[cfg(not(CONFIG_NUMA))]
+macro_rules! vm_numa_fields {
+ ( $($field:ident: $val:expr),* $(,)? ) => {
+ bindings::vm_operations_struct {
+ $( $field: $val ),*
+ }
+ }
+}
+
+const SHMEM_VM_OPS: bindings::vm_operations_struct = vm_numa_fields! {
+ open: Some(bindings::drm_gem_shmem_vm_open),
+ close: Some(bindings::drm_gem_shmem_vm_close),
+ may_split: None,
+ mremap: None,
+ mprotect: None,
+ fault: Some(bindings::drm_gem_shmem_fault),
+ huge_fault: None,
+ map_pages: None,
+ pagesize: None,
+ page_mkwrite: None,
+ pfn_mkwrite: None,
+ access: None,
+ name: None,
+ find_special_page: None,
+};
+
+/// A shmem-backed GEM object.
+#[repr(C)]
+pub struct Object<T: DriverObject> {
+ obj: bindings::drm_gem_shmem_object,
+ // The DRM core ensures the Device exists as long as its objects exist, so we don't need to
+ // manage the reference count here.
+ dev: ManuallyDrop<device::Device<T::Driver>>,
+ inner: T,
+}
+
+unsafe extern "C" fn gem_create_object<T: DriverObject>(
+ raw_dev: *mut bindings::drm_device,
+ size: usize,
+) -> *mut bindings::drm_gem_object {
+ // SAFETY: GEM ensures the device lives as long as its objects live,
+ // so we can conjure up a reference from thin air and never drop it.
+ let dev = ManuallyDrop::new(unsafe { device::Device::from_raw(raw_dev) });
+
+ let inner = match T::new(&*dev, size) {
+ Ok(v) => v,
+ Err(e) => return e.to_ptr(),
+ };
+
+ let p = unsafe {
+ bindings::krealloc(
+ core::ptr::null(),
+ Object::<T>::SIZE,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ ) as *mut Object<T>
+ };
+
+ if p.is_null() {
+ return ENOMEM.to_ptr();
+ }
+
+ // SAFETY: p is valid as long as the alloc succeeded
+ unsafe {
+ addr_of_mut!((*p).dev).write(dev);
+ addr_of_mut!((*p).inner).write(inner);
+ }
+
+ // SAFETY: drm_gem_shmem_object is safe to zero-init, and
+ // the rest of Object has been initialized
+ let new: &mut Object<T> = unsafe { &mut *(p as *mut _) };
+
+ new.obj.base.funcs = &Object::<T>::VTABLE;
+ &mut new.obj.base
+}
+
+unsafe extern "C" fn free_callback<T: DriverObject>(obj: *mut bindings::drm_gem_object) {
+ // SAFETY: All of our objects are Object<T>.
+ let p = crate::container_of!(obj, Object<T>, obj) as *mut Object<T>;
+
+ // SAFETY: p is never used after this
+ unsafe {
+ core::ptr::drop_in_place(&mut (*p).inner);
+ }
+
+ // SAFETY: This pointer has to be valid, since p is valid
+ unsafe {
+ bindings::drm_gem_shmem_free(&mut (*p).obj);
+ }
+}
+
+impl<T: DriverObject> Object<T> {
+ /// The size of this object's structure.
+ const SIZE: usize = mem::size_of::<Self>();
+
+ /// `drm_gem_object_funcs` vtable suitable for GEM shmem objects.
+ const VTABLE: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
+ free: Some(free_callback::<T>),
+ open: Some(super::open_callback::<T, Object<T>>),
+ close: Some(super::close_callback::<T, Object<T>>),
+ print_info: Some(bindings::drm_gem_shmem_object_print_info),
+ export: None,
+ pin: Some(bindings::drm_gem_shmem_object_pin),
+ unpin: Some(bindings::drm_gem_shmem_object_unpin),
+ get_sg_table: Some(bindings::drm_gem_shmem_object_get_sg_table),
+ vmap: Some(bindings::drm_gem_shmem_object_vmap),
+ vunmap: Some(bindings::drm_gem_shmem_object_vunmap),
+ mmap: Some(bindings::drm_gem_shmem_object_mmap),
+ vm_ops: &SHMEM_VM_OPS,
+ };
+
+ // SAFETY: Must only be used with DRM functions that are thread-safe
+ unsafe fn mut_shmem(&self) -> *mut bindings::drm_gem_shmem_object {
+ &self.obj as *const _ as *mut _
+ }
+
+ /// Create a new shmem-backed DRM object of the given size.
+ pub fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<gem::UniqueObjectRef<Self>> {
+ // SAFETY: This function can be called as long as the ALLOC_OPS are set properly
+ // for this driver, and the gem_create_object is called.
+ let p = unsafe { bindings::drm_gem_shmem_create(dev.raw() as *mut _, size) };
+ let p = crate::container_of!(p, Object<T>, obj) as *mut _;
+
+ // SAFETY: The gem_create_object callback ensures this is a valid Object<T>,
+ // so we can take a unique reference to it.
+ let obj_ref = gem::UniqueObjectRef { ptr: p };
+
+ Ok(obj_ref)
+ }
+
+ /// Returns the `Device` that owns this GEM object.
+ pub fn dev(&self) -> &device::Device<T::Driver> {
+ &self.dev
+ }
+
+ /// Creates (if necessary) and returns a scatter-gather table of DMA pages for this object.
+ ///
+ /// This will pin the object in memory.
+ pub fn sg_table(&self) -> Result<SGTable<T>> {
+ // SAFETY: drm_gem_shmem_get_pages_sgt is thread-safe.
+ let sgt = from_kernel_err_ptr(unsafe {
+ bindings::drm_gem_shmem_get_pages_sgt(self.mut_shmem())
+ })?;
+
+ Ok(SGTable {
+ sgt,
+ _owner: self.reference(),
+ })
+ }
+
+ /// Creates and returns a virtual kernel memory mapping for this object.
+ pub fn vmap(&self) -> Result<VMap<T>> {
+ let mut map: MaybeUninit<bindings::iosys_map> = MaybeUninit::uninit();
+
+ // SAFETY: drm_gem_shmem_vmap is thread-safe
+ to_result(unsafe { bindings::drm_gem_shmem_vmap(self.mut_shmem(), map.as_mut_ptr()) })?;
+
+ // SAFETY: if drm_gem_shmem_vmap did not fail, map is initialized now
+ let map = unsafe { map.assume_init() };
+
+ Ok(VMap {
+ map,
+ owner: self.reference(),
+ })
+ }
+
+ /// Set the write-combine flag for this object.
+ ///
+ /// Should be called before any mappings are made.
+ pub fn set_wc(&mut self, map_wc: bool) {
+ unsafe { (*self.mut_shmem()).map_wc = map_wc };
+ }
+}
+
+impl<T: DriverObject> Deref for Object<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<T: DriverObject> DerefMut for Object<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+impl<T: DriverObject> crate::private::Sealed for Object<T> {}
+
+impl<T: DriverObject> gem::IntoGEMObject for Object<T> {
+ type Driver = T::Driver;
+
+ fn gem_obj(&self) -> &bindings::drm_gem_object {
+ &self.obj.base
+ }
+
+ fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object {
+ &mut self.obj.base
+ }
+
+ fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Object<T> {
+ crate::container_of!(obj, Object<T>, obj) as *mut Object<T>
+ }
+}
+
+impl<T: DriverObject> drv::AllocImpl for Object<T> {
+ const ALLOC_OPS: drv::AllocOps = drv::AllocOps {
+ gem_create_object: Some(gem_create_object::<T>),
+ prime_handle_to_fd: Some(bindings::drm_gem_prime_handle_to_fd),
+ prime_fd_to_handle: Some(bindings::drm_gem_prime_fd_to_handle),
+ gem_prime_import: None,
+ gem_prime_import_sg_table: Some(bindings::drm_gem_shmem_prime_import_sg_table),
+ gem_prime_mmap: Some(bindings::drm_gem_prime_mmap),
+ dumb_create: Some(bindings::drm_gem_shmem_dumb_create),
+ dumb_map_offset: None,
+ dumb_destroy: None,
+ };
+}
+
+/// A virtual mapping for a shmem-backed GEM object in kernel address space.
+pub struct VMap<T: DriverObject> {
+ map: bindings::iosys_map,
+ owner: gem::ObjectRef<Object<T>>,
+}
+
+impl<T: DriverObject> VMap<T> {
+ /// Returns a const raw pointer to the start of the mapping.
+ pub fn as_ptr(&self) -> *const core::ffi::c_void {
+ // SAFETY: The shmem helpers always return non-iomem maps
+ unsafe { self.map.__bindgen_anon_1.vaddr }
+ }
+
+ /// Returns a mutable raw pointer to the start of the mapping.
+ pub fn as_mut_ptr(&mut self) -> *mut core::ffi::c_void {
+ // SAFETY: The shmem helpers always return non-iomem maps
+ unsafe { self.map.__bindgen_anon_1.vaddr }
+ }
+
+ /// Returns a byte slice view of the mapping.
+ pub fn as_slice(&self) -> &[u8] {
+ // SAFETY: The vmap maps valid memory up to the owner size
+ unsafe { slice::from_raw_parts(self.as_ptr() as *const u8, self.owner.size()) }
+ }
+
+ /// Returns mutable a byte slice view of the mapping.
+ pub fn as_mut_slice(&mut self) -> &mut [u8] {
+ // SAFETY: The vmap maps valid memory up to the owner size
+ unsafe { slice::from_raw_parts_mut(self.as_mut_ptr() as *mut u8, self.owner.size()) }
+ }
+
+ /// Borrows a reference to the object that owns this virtual mapping.
+ pub fn owner(&self) -> &gem::ObjectRef<Object<T>> {
+ &self.owner
+ }
+}
+
+impl<T: DriverObject> Drop for VMap<T> {
+ fn drop(&mut self) {
+ // SAFETY: This function is thread-safe
+ unsafe {
+ bindings::drm_gem_shmem_vunmap(self.owner.mut_shmem(), &mut self.map);
+ }
+ }
+}
+
+/// SAFETY: `iosys_map` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Send for VMap<T> {}
+unsafe impl<T: DriverObject> Sync for VMap<T> {}
+
+/// A single scatter-gather entry, representing a span of pages in the device's DMA address space.
+///
+/// For devices not behind a standalone IOMMU, this corresponds to physical addresses.
+#[repr(transparent)]
+pub struct SGEntry(bindings::scatterlist);
+
+impl SGEntry {
+ /// Returns the starting DMA address of this span
+ pub fn dma_address(&self) -> usize {
+ (unsafe { bindings::sg_dma_address(&self.0) }) as usize
+ }
+
+ /// Returns the length of this span in bytes
+ pub fn dma_len(&self) -> usize {
+ (unsafe { bindings::sg_dma_len(&self.0) }) as usize
+ }
+}
+
+/// A scatter-gather table of DMA address spans for a GEM shmem object.
+///
+/// # Invariants
+/// `sgt` must be a valid pointer to the `sg_table`, which must correspond to the owned
+/// object in `_owner` (which ensures it remains valid).
+pub struct SGTable<T: DriverObject> {
+ sgt: *const bindings::sg_table,
+ _owner: gem::ObjectRef<Object<T>>,
+}
+
+impl<T: DriverObject> SGTable<T> {
+ /// Returns an iterator through the SGTable's entries
+ pub fn iter(&'_ self) -> SGTableIter<'_> {
+ SGTableIter {
+ left: unsafe { (*self.sgt).nents } as usize,
+ sg: unsafe { (*self.sgt).sgl },
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<'a, T: DriverObject> IntoIterator for &'a SGTable<T> {
+ type Item = &'a SGEntry;
+ type IntoIter = SGTableIter<'a>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// SAFETY: `sg_table` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Send for SGTable<T> {}
+unsafe impl<T: DriverObject> Sync for SGTable<T> {}
+
+/// An iterator through `SGTable` entries.
+///
+/// # Invariants
+/// `sg` must be a valid pointer to the scatterlist, which must outlive our lifetime.
+pub struct SGTableIter<'a> {
+ sg: *mut bindings::scatterlist,
+ left: usize,
+ _p: PhantomData<&'a ()>,
+}
+
+impl<'a> Iterator for SGTableIter<'a> {
+ type Item = &'a SGEntry;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.left == 0 {
+ None
+ } else {
+ let sg = self.sg;
+ self.sg = unsafe { bindings::sg_next(self.sg) };
+ self.left -= 1;
+ Some(unsafe { &(*(sg as *const SGEntry)) })
+ }
+ }
+}
diff --git a/rust/kernel/drm/ioctl.rs b/rust/kernel/drm/ioctl.rs
new file mode 100644
index 000000000000..10304efbd5f1
--- /dev/null
+++ b/rust/kernel/drm/ioctl.rs
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+#![allow(non_snake_case)]
+
+//! DRM IOCTL definitions.
+//!
+//! C header: [`include/linux/drm/drm_ioctl.h`](../../../../include/linux/drm/drm_ioctl.h)
+
+use crate::ioctl;
+
+const BASE: u32 = bindings::DRM_IOCTL_BASE as u32;
+
+/// Construct a DRM ioctl number with no argument.
+pub const fn IO(nr: u32) -> u32 {
+ ioctl::_IO(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-only argument.
+pub const fn IOR<T>(nr: u32) -> u32 {
+ ioctl::_IOR::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a write-only argument.
+pub const fn IOW<T>(nr: u32) -> u32 {
+ ioctl::_IOW::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-write argument.
+pub const fn IOWR<T>(nr: u32) -> u32 {
+ ioctl::_IOWR::<T>(BASE, nr)
+}
+
+/// Descriptor type for DRM ioctls. Use the `declare_drm_ioctls!{}` macro to construct them.
+pub type DrmIoctlDescriptor = bindings::drm_ioctl_desc;
+
+/// This is for ioctl which are used for rendering, and require that the file descriptor is either
+/// for a render node, or if it’s a legacy/primary node, then it must be authenticated.
+pub const AUTH: u32 = bindings::drm_ioctl_flags_DRM_AUTH;
+
+/// This must be set for any ioctl which can change the modeset or display state. Userspace must
+/// call the ioctl through a primary node, while it is the active master.
+///
+/// Note that read-only modeset ioctl can also be called by unauthenticated clients, or when a
+/// master is not the currently active one.
+pub const MASTER: u32 = bindings::drm_ioctl_flags_DRM_MASTER;
+
+/// Anything that could potentially wreak a master file descriptor needs to have this flag set.
+///
+/// Current that’s only for the SETMASTER and DROPMASTER ioctl, which e.g. logind can call to force
+/// a non-behaving master (display compositor) into compliance.
+///
+/// This is equivalent to callers with the SYSADMIN capability.
+pub const ROOT_ONLY: u32 = bindings::drm_ioctl_flags_DRM_ROOT_ONLY;
+
+/// Whether drm_ioctl_desc.func should be called with the DRM BKL held or not. Enforced as the
+/// default for all modern drivers, hence there should never be a need to set this flag.
+///
+/// Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the only legacy IOCTL which
+/// needs this.
+pub const UNLOCKED: u32 = bindings::drm_ioctl_flags_DRM_UNLOCKED;
+
+/// This is used for all ioctl needed for rendering only, for drivers which support render nodes.
+/// This should be all new render drivers, and hence it should be always set for any ioctl with
+/// `AUTH` set. Note though that read-only query ioctl might have this set, but have not set
+/// DRM_AUTH because they do not require authentication.
+pub const RENDER_ALLOW: u32 = bindings::drm_ioctl_flags_DRM_RENDER_ALLOW;
+
+/// Declare the DRM ioctls for a driver.
+///
+/// Each entry in the list should have the form:
+///
+/// `(ioctl_number, argument_type, flags, user_callback),`
+///
+/// `argument_type` is the type name within the `bindings` crate.
+/// `user_callback` should have the following prototype:
+///
+/// ```
+/// fn foo(device: &kernel::drm::device::Device<Self>,
+/// data: &mut bindings::argument_type,
+/// file: &kernel::drm::file::File<Self::File>,
+/// )
+/// ```
+/// where `Self` is the drm::drv::Driver implementation these ioctls are being declared within.
+///
+/// # Examples
+///
+/// ```
+/// kernel::declare_drm_ioctls! {
+/// (FOO_GET_PARAM, drm_foo_get_param, ioctl::RENDER_ALLOW, my_get_param_handler),
+/// }
+/// ```
+///
+#[macro_export]
+macro_rules! declare_drm_ioctls {
+ ( $(($cmd:ident, $struct:ident, $flags:expr, $func:expr)),* $(,)? ) => {
+ const IOCTLS: &'static [$crate::drm::ioctl::DrmIoctlDescriptor] = {
+ const _:() = {
+ let i: u32 = $crate::bindings::DRM_COMMAND_BASE;
+ // Assert that all the IOCTLs are in the right order and there are no gaps,
+ // and that the sizeof of the specified type is correct.
+ $(
+ let cmd: u32 = $crate::macros::concat_idents!($crate::bindings::DRM_IOCTL_, $cmd);
+ ::core::assert!(i == $crate::ioctl::_IOC_NR(cmd));
+ ::core::assert!(core::mem::size_of::<$crate::bindings::$struct>() == $crate::ioctl::_IOC_SIZE(cmd));
+ let i: u32 = i + 1;
+ )*
+ };
+
+ let ioctls = &[$(
+ $crate::bindings::drm_ioctl_desc {
+ cmd: $crate::macros::concat_idents!($crate::bindings::DRM_IOCTL_, $cmd) as u32,
+ func: {
+ #[allow(non_snake_case)]
+ unsafe extern "C" fn $cmd(
+ raw_dev: *mut $crate::bindings::drm_device,
+ raw_data: *mut ::core::ffi::c_void,
+ raw_file_priv: *mut $crate::bindings::drm_file,
+ ) -> core::ffi::c_int {
+ // SAFETY: We never drop this, and the DRM core ensures the device lives
+ // while callbacks are being called.
+ //
+ // FIXME: Currently there is nothing enforcing that the types of the
+ // dev/file match the current driver these ioctls are being declared
+ // for, and it's not clear how to enforce this within the type system.
+ let dev = ::core::mem::ManuallyDrop::new(unsafe {
+ $crate::drm::device::Device::from_raw(raw_dev)
+ });
+ // SAFETY: This is just the ioctl argument, which hopefully has the right type
+ // (we've done our best checking the size).
+ let data = unsafe { &mut *(raw_data as *mut $crate::bindings::$struct) };
+ // SAFETY: This is just the DRM file structure
+ let file = unsafe { $crate::drm::file::File::from_raw(raw_file_priv) };
+
+ match $func(&*dev, data, &file) {
+ Err(e) => e.to_kernel_errno(),
+ Ok(i) => i.try_into().unwrap_or(ERANGE.to_kernel_errno()),
+ }
+ }
+ Some($cmd)
+ },
+ flags: $flags,
+ name: $crate::c_str!(::core::stringify!($cmd)).as_char_ptr(),
+ }
+ ),*];
+ ioctls
+ };
+ };
+}
diff --git a/rust/kernel/drm/mm.rs b/rust/kernel/drm/mm.rs
new file mode 100644
index 000000000000..83e27a7dcc7e
--- /dev/null
+++ b/rust/kernel/drm/mm.rs
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM MM range allocator
+//!
+//! C header: [`include/linux/drm/drm_mm.h`](../../../../include/linux/drm/drm_mm.h)
+
+use crate::{
+ bindings,
+ error::{to_result, Result},
+ str::CStr,
+ sync::{Arc, LockClassKey, LockIniter, Mutex, UniqueArc},
+ types::Opaque,
+};
+
+use alloc::boxed::Box;
+
+use core::{
+ marker::{PhantomData, PhantomPinned},
+ ops::Deref,
+ pin::Pin,
+};
+
+/// Type alias representing a DRM MM node.
+pub type Node<A, T> = Pin<Box<NodeData<A, T>>>;
+
+/// Trait which must be implemented by the inner allocator state type provided by the user.
+pub trait AllocInner<T> {
+ /// Notification that a node was dropped from the allocator.
+ fn drop_object(&mut self, _start: u64, _size: u64, _color: usize, _object: &mut T) {}
+}
+
+impl<T> AllocInner<T> for () {}
+
+/// Wrapper type for a `struct drm_mm` plus user AllocInner object.
+///
+/// # Invariants
+/// The `drm_mm` struct is valid and initialized.
+struct MmInner<A: AllocInner<T>, T>(Opaque<bindings::drm_mm>, A, PhantomData<T>);
+
+/// Represents a single allocated node in the MM allocator
+pub struct NodeData<A: AllocInner<T>, T> {
+ node: bindings::drm_mm_node,
+ mm: Arc<Mutex<MmInner<A, T>>>,
+ valid: bool,
+ /// A drm_mm_node needs to be pinned because nodes reference each other in a linked list.
+ _pin: PhantomPinned,
+ inner: T,
+}
+
+// SAFETY: Allocator ops take the mutex, and there are no mutable actions on the node.
+unsafe impl<A: Send + AllocInner<T>, T: Send> Send for NodeData<A, T> {}
+unsafe impl<A: Send + AllocInner<T>, T: Sync> Sync for NodeData<A, T> {}
+
+/// Available MM node insertion modes
+#[repr(u32)]
+pub enum InsertMode {
+ /// Search for the smallest hole (within the search range) that fits the desired node.
+ ///
+ /// Allocates the node from the bottom of the found hole.
+ Best = bindings::drm_mm_insert_mode_DRM_MM_INSERT_BEST,
+
+ /// Search for the lowest hole (address closest to 0, within the search range) that fits the
+ /// desired node.
+ ///
+ /// Allocates the node from the bottom of the found hole.
+ Low = bindings::drm_mm_insert_mode_DRM_MM_INSERT_LOW,
+
+ /// Search for the highest hole (address closest to U64_MAX, within the search range) that fits
+ /// the desired node.
+ ///
+ /// Allocates the node from the top of the found hole. The specified alignment for the node is
+ /// applied to the base of the node (`Node.start()`).
+ High = bindings::drm_mm_insert_mode_DRM_MM_INSERT_HIGH,
+
+ /// Search for the most recently evicted hole (within the search range) that fits the desired
+ /// node. This is appropriate for use immediately after performing an eviction scan and removing
+ /// the selected nodes to form a hole.
+ ///
+ /// Allocates the node from the bottom of the found hole.
+ Evict = bindings::drm_mm_insert_mode_DRM_MM_INSERT_EVICT,
+}
+
+/// A clonable, interlocked reference to the allocator state.
+///
+/// This is useful to perform actions on the user-supplied `AllocInner<T>` type given just a Node,
+/// without immediately taking the lock.
+#[derive(Clone)]
+pub struct InnerRef<A: AllocInner<T>, T>(Arc<Mutex<MmInner<A, T>>>);
+
+impl<A: AllocInner<T>, T> InnerRef<A, T> {
+ /// Operate on the user `AllocInner<T>` implementation, taking the lock.
+ pub fn with<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+ let mut l = self.0.lock();
+ cb(&mut l.1)
+ }
+}
+
+impl<A: AllocInner<T>, T> NodeData<A, T> {
+ /// Returns the color of the node (an opaque value)
+ pub fn color(&self) -> usize {
+ self.node.color as usize
+ }
+
+ /// Returns the start address of the node
+ pub fn start(&self) -> u64 {
+ self.node.start
+ }
+
+ /// Returns the size of the node in bytes
+ pub fn size(&self) -> u64 {
+ self.node.size
+ }
+
+ /// Operate on the user `AllocInner<T>` implementation associated with this node's allocator.
+ pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+ let mut l = self.mm.lock();
+ cb(&mut l.1)
+ }
+
+ /// Return a clonable, detached reference to the allocator inner data.
+ pub fn alloc_ref(&self) -> InnerRef<A, T> {
+ InnerRef(self.mm.clone())
+ }
+
+ /// Return a mutable reference to the inner data.
+ pub fn inner_mut(self: Pin<&mut Self>) -> &mut T {
+ // SAFETY: This is okay because inner is not structural
+ unsafe { &mut self.get_unchecked_mut().inner }
+ }
+}
+
+impl<A: AllocInner<T>, T> Deref for NodeData<A, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<A: AllocInner<T>, T> Drop for NodeData<A, T> {
+ fn drop(&mut self) {
+ if self.valid {
+ let mut guard = self.mm.lock();
+
+ // Inform the user allocator that a node is being dropped.
+ guard
+ .1
+ .drop_object(self.start(), self.size(), self.color(), &mut self.inner);
+ // SAFETY: The MM lock is still taken, so we can safely remove the node.
+ unsafe { bindings::drm_mm_remove_node(&mut self.node) };
+ }
+ }
+}
+
+/// An instance of a DRM MM range allocator.
+pub struct Allocator<A: AllocInner<T>, T> {
+ mm: Arc<Mutex<MmInner<A, T>>>,
+ _p: PhantomData<T>,
+}
+
+impl<A: AllocInner<T>, T> Allocator<A, T> {
+ /// Create a new range allocator for the given start and size range of addresses.
+ ///
+ /// The user may optionally provide an inner object representing allocator state, which will
+ /// be protected by the same lock. If not required, `()` can be used.
+ pub fn new(
+ start: u64,
+ size: u64,
+ inner: A,
+ name: &'static CStr,
+ lock_key: &'static LockClassKey,
+ ) -> Result<Allocator<A, T>> {
+ // SAFETY: We call `Mutex::init_lock` below.
+ let mut mm: Pin<UniqueArc<Mutex<MmInner<A, T>>>> = UniqueArc::try_new(unsafe {
+ Mutex::new(MmInner(Opaque::uninit(), inner, PhantomData))
+ })?
+ .into();
+
+ mm.as_mut().init_lock(name, lock_key);
+
+ unsafe {
+ // SAFETY: The Opaque instance provides a valid pointer, and it is initialized after
+ // this call.
+ bindings::drm_mm_init(mm.lock().0.get(), start, size);
+ }
+
+ Ok(Allocator {
+ mm: mm.into(),
+ _p: PhantomData,
+ })
+ }
+
+ /// Insert a new node into the allocator of a given size.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ pub fn insert_node(&mut self, node: T, size: u64) -> Result<Node<A, T>> {
+ self.insert_node_generic(node, size, 0, 0, InsertMode::Best)
+ }
+
+ /// Insert a new node into the allocator of a given size, with configurable alignment,
+ /// color, and insertion mode.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ pub fn insert_node_generic(
+ &mut self,
+ node: T,
+ size: u64,
+ alignment: u64,
+ color: usize,
+ mode: InsertMode,
+ ) -> Result<Node<A, T>> {
+ self.insert_node_in_range(node, size, alignment, color, 0, u64::MAX, mode)
+ }
+
+ /// Insert a new node into the allocator of a given size, with configurable alignment,
+ /// color, insertion mode, and sub-range to allocate from.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ #[allow(clippy::too_many_arguments)]
+ pub fn insert_node_in_range(
+ &mut self,
+ node: T,
+ size: u64,
+ alignment: u64,
+ color: usize,
+ start: u64,
+ end: u64,
+ mode: InsertMode,
+ ) -> Result<Node<A, T>> {
+ let mut mm_node = Box::try_new(NodeData {
+ // SAFETY: This C struct should be zero-initialized.
+ node: unsafe { core::mem::zeroed() },
+ valid: false,
+ inner: node,
+ mm: self.mm.clone(),
+ _pin: PhantomPinned,
+ })?;
+
+ let guard = self.mm.lock();
+ // SAFETY: We hold the lock and all pointers are valid.
+ to_result(unsafe {
+ bindings::drm_mm_insert_node_in_range(
+ guard.0.get(),
+ &mut mm_node.node,
+ size,
+ alignment,
+ color as core::ffi::c_ulong,
+ start,
+ end,
+ mode as u32,
+ )
+ })?;
+
+ mm_node.valid = true;
+
+ Ok(Pin::from(mm_node))
+ }
+
+ /// Insert a node into the allocator at a fixed start address.
+ ///
+ /// `node` is the user `T` type data to store into the node.
+ pub fn reserve_node(
+ &mut self,
+ node: T,
+ start: u64,
+ size: u64,
+ color: usize,
+ ) -> Result<Node<A, T>> {
+ let mut mm_node = Box::try_new(NodeData {
+ // SAFETY: This C struct should be zero-initialized.
+ node: unsafe { core::mem::zeroed() },
+ valid: false,
+ inner: node,
+ mm: self.mm.clone(),
+ _pin: PhantomPinned,
+ })?;
+
+ mm_node.node.start = start;
+ mm_node.node.size = size;
+ mm_node.node.color = color as core::ffi::c_ulong;
+
+ let guard = self.mm.lock();
+ // SAFETY: We hold the lock and all pointers are valid.
+ to_result(unsafe { bindings::drm_mm_reserve_node(guard.0.get(), &mut mm_node.node) })?;
+
+ mm_node.valid = true;
+
+ Ok(Pin::from(mm_node))
+ }
+
+ /// Operate on the inner user type `A`, taking the allocator lock
+ pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+ let mut guard = self.mm.lock();
+ cb(&mut guard.1)
+ }
+}
+
+impl<A: AllocInner<T>, T> Drop for MmInner<A, T> {
+ fn drop(&mut self) {
+ // SAFETY: If the MmInner is dropped then all nodes are gone (since they hold references),
+ // so it is safe to tear down the allocator.
+ unsafe {
+ bindings::drm_mm_takedown(self.0.get());
+ }
+ }
+}
+
+// MmInner is safely Send if the AllocInner user type is Send.
+unsafe impl<A: Send + AllocInner<T>, T> Send for MmInner<A, T> {}
diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs
new file mode 100644
index 000000000000..b1f182453ec1
--- /dev/null
+++ b/rust/kernel/drm/mod.rs
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM subsystem abstractions.
+
+pub mod device;
+pub mod drv;
+pub mod file;
+pub mod gem;
+pub mod ioctl;
+pub mod mm;
+pub mod sched;
+pub mod syncobj;
diff --git a/rust/kernel/drm/sched.rs b/rust/kernel/drm/sched.rs
new file mode 100644
index 000000000000..a5275cc16179
--- /dev/null
+++ b/rust/kernel/drm/sched.rs
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM Scheduler
+//!
+//! C header: [`include/linux/drm/gpu_scheduler.h`](../../../../include/linux/drm/gpu_scheduler.h)
+
+use crate::{
+ bindings, device,
+ dma_fence::*,
+ error::{to_result, Result},
+ prelude::*,
+ sync::{Arc, UniqueArc},
+};
+use alloc::boxed::Box;
+use core::marker::PhantomData;
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use core::ptr::addr_of_mut;
+
+/// Scheduler status after timeout recovery
+#[repr(u32)]
+pub enum Status {
+ /// Device recovered from the timeout and can execute jobs again
+ Nominal = bindings::drm_gpu_sched_stat_DRM_GPU_SCHED_STAT_NOMINAL,
+ /// Device is no longer available
+ NoDevice = bindings::drm_gpu_sched_stat_DRM_GPU_SCHED_STAT_ENODEV,
+}
+
+/// Scheduler priorities
+#[repr(i32)]
+pub enum Priority {
+ /// Low userspace priority
+ Min = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_MIN,
+ /// Normal userspace priority
+ Normal = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_NORMAL,
+ /// High userspace priority
+ High = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_HIGH,
+ /// Kernel priority (highest)
+ Kernel = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_KERNEL,
+}
+
+/// Trait to be implemented by driver job objects.
+pub trait JobImpl: Sized {
+ /// Called when the scheduler is considering scheduling this job next, to get another Fence
+ /// for this job to block on. Once it returns None, run() may be called.
+ fn prepare(_job: &mut Job<Self>) -> Option<Fence> {
+ None // Equivalent to NULL function pointer
+ }
+
+ /// Called before job execution to check whether the hardware is free enough to run the job.
+ /// This can be used to implement more complex hardware resource policies than the hw_submission
+ /// limit.
+ fn can_run(_job: &mut Job<Self>) -> bool {
+ true
+ }
+
+ /// Called to execute the job once all of the dependencies have been resolved. This may be
+ /// called multiple times, if timed_out() has happened and drm_sched_job_recovery() decides
+ /// to try it again.
+ fn run(job: &mut Job<Self>) -> Result<Option<Fence>>;
+
+ /// Called when a job has taken too long to execute, to trigger GPU recovery.
+ ///
+ /// This method is called in a workqueue context.
+ fn timed_out(job: &mut Job<Self>) -> Status;
+}
+
+unsafe extern "C" fn prepare_job_cb<T: JobImpl>(
+ sched_job: *mut bindings::drm_sched_job,
+ _s_entity: *mut bindings::drm_sched_entity,
+) -> *mut bindings::dma_fence {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ match T::prepare(unsafe { &mut *p }) {
+ None => core::ptr::null_mut(),
+ Some(fence) => fence.into_raw(),
+ }
+}
+
+unsafe extern "C" fn run_job_cb<T: JobImpl>(
+ sched_job: *mut bindings::drm_sched_job,
+) -> *mut bindings::dma_fence {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ match T::run(unsafe { &mut *p }) {
+ Err(e) => e.to_ptr(),
+ Ok(None) => core::ptr::null_mut(),
+ Ok(Some(fence)) => fence.into_raw(),
+ }
+}
+
+unsafe extern "C" fn can_run_job_cb<T: JobImpl>(sched_job: *mut bindings::drm_sched_job) -> bool {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ T::can_run(unsafe { &mut *p })
+}
+
+unsafe extern "C" fn timedout_job_cb<T: JobImpl>(
+ sched_job: *mut bindings::drm_sched_job,
+) -> bindings::drm_gpu_sched_stat {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ T::timed_out(unsafe { &mut *p }) as bindings::drm_gpu_sched_stat
+}
+
+unsafe extern "C" fn free_job_cb<T: JobImpl>(sched_job: *mut bindings::drm_sched_job) {
+ // SAFETY: All of our jobs are Job<T>.
+ let p = crate::container_of!(sched_job, Job<T>, job) as *mut Job<T>;
+
+ // Convert the job back to a Box and drop it
+ // SAFETY: All of our Job<T>s are created inside a box.
+ unsafe { Box::from_raw(p) };
+}
+
+/// A DRM scheduler job.
+pub struct Job<T: JobImpl> {
+ job: bindings::drm_sched_job,
+ inner: T,
+}
+
+impl<T: JobImpl> Deref for Job<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<T: JobImpl> DerefMut for Job<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+impl<T: JobImpl> Drop for Job<T> {
+ fn drop(&mut self) {
+ // SAFETY: At this point the job has either been submitted and this is being called from
+ // `free_job_cb` above, or it hasn't and it is safe to call `drm_sched_job_cleanup`.
+ unsafe { bindings::drm_sched_job_cleanup(&mut self.job) };
+ }
+}
+
+/// A pending DRM scheduler job (not yet armed)
+pub struct PendingJob<'a, T: JobImpl>(Box<Job<T>>, PhantomData<&'a T>);
+
+impl<'a, T: JobImpl> PendingJob<'a, T> {
+ /// Add a fence as a dependency to the job
+ pub fn add_dependency(&mut self, fence: Fence) -> Result {
+ to_result(unsafe {
+ bindings::drm_sched_job_add_dependency(&mut self.0.job, fence.into_raw())
+ })
+ }
+
+ /// Arm the job to make it ready for execution
+ pub fn arm(mut self) -> ArmedJob<'a, T> {
+ unsafe { bindings::drm_sched_job_arm(&mut self.0.job) };
+ ArmedJob(self.0, PhantomData)
+ }
+}
+
+impl<'a, T: JobImpl> Deref for PendingJob<'a, T> {
+ type Target = Job<T>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<'a, T: JobImpl> DerefMut for PendingJob<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+/// An armed DRM scheduler job (not yet submitted)
+pub struct ArmedJob<'a, T: JobImpl>(Box<Job<T>>, PhantomData<&'a T>);
+
+impl<'a, T: JobImpl> ArmedJob<'a, T> {
+ /// Returns the job fences
+ pub fn fences(&self) -> JobFences<'_> {
+ JobFences(unsafe { &mut *self.0.job.s_fence })
+ }
+
+ /// Push the job for execution into the scheduler
+ pub fn push(self) {
+ // After this point, the job is submitted and owned by the scheduler
+ let ptr = match self {
+ ArmedJob(job, _) => Box::<Job<T>>::into_raw(job),
+ };
+
+ // SAFETY: We are passing in ownership of a valid Box raw pointer.
+ unsafe { bindings::drm_sched_entity_push_job(addr_of_mut!((*ptr).job)) };
+ }
+}
+impl<'a, T: JobImpl> Deref for ArmedJob<'a, T> {
+ type Target = Job<T>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl<'a, T: JobImpl> DerefMut for ArmedJob<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+/// Reference to the bundle of fences attached to a DRM scheduler job
+pub struct JobFences<'a>(&'a mut bindings::drm_sched_fence);
+
+impl<'a> JobFences<'a> {
+ /// Returns a new reference to the job scheduled fence.
+ pub fn scheduled(&mut self) -> Fence {
+ unsafe { Fence::get_raw(&mut self.0.scheduled) }
+ }
+
+ /// Returns a new reference to the job finished fence.
+ pub fn finished(&mut self) -> Fence {
+ unsafe { Fence::get_raw(&mut self.0.finished) }
+ }
+}
+
+struct EntityInner<T: JobImpl> {
+ entity: bindings::drm_sched_entity,
+ // TODO: Allow users to share guilty flag between entities
+ sched: Arc<SchedulerInner<T>>,
+ guilty: bindings::atomic_t,
+ _p: PhantomData<T>,
+}
+
+impl<T: JobImpl> Drop for EntityInner<T> {
+ fn drop(&mut self) {
+ // SAFETY: The EntityInner is initialized. This will cancel/free all jobs.
+ unsafe { bindings::drm_sched_entity_destroy(&mut self.entity) };
+ }
+}
+
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Sync for EntityInner<T> {}
+unsafe impl<T: JobImpl> Send for EntityInner<T> {}
+
+/// A DRM scheduler entity.
+pub struct Entity<T: JobImpl>(Pin<Box<EntityInner<T>>>);
+
+impl<T: JobImpl> Entity<T> {
+ /// Create a new scheduler entity.
+ pub fn new(sched: &Scheduler<T>, priority: Priority) -> Result<Self> {
+ let mut entity: Box<MaybeUninit<EntityInner<T>>> = Box::try_new_zeroed()?;
+
+ let mut sched_ptr = &sched.0.sched as *const _ as *mut _;
+
+ // SAFETY: The Box is allocated above and valid.
+ unsafe {
+ bindings::drm_sched_entity_init(
+ addr_of_mut!((*entity.as_mut_ptr()).entity),
+ priority as _,
+ &mut sched_ptr,
+ 1,
+ addr_of_mut!((*entity.as_mut_ptr()).guilty),
+ )
+ };
+
+ // SAFETY: The Box is allocated above and valid.
+ unsafe { addr_of_mut!((*entity.as_mut_ptr()).sched).write(sched.0.clone()) };
+
+ // SAFETY: entity is now initialized.
+ Ok(Self(Pin::from(unsafe { entity.assume_init() })))
+ }
+
+ /// Create a new job on this entity.
+ ///
+ /// The entity must outlive the pending job until it transitions into the submitted state,
+ /// after which the scheduler owns it.
+ pub fn new_job(&self, inner: T) -> Result<PendingJob<'_, T>> {
+ let mut job: Box<MaybeUninit<Job<T>>> = Box::try_new_zeroed()?;
+
+ // SAFETY: We hold a reference to the entity (which is a valid pointer),
+ // and the job object was just allocated above.
+ to_result(unsafe {
+ bindings::drm_sched_job_init(
+ addr_of_mut!((*job.as_mut_ptr()).job),
+ &self.0.as_ref().get_ref().entity as *const _ as *mut _,
+ core::ptr::null_mut(),
+ )
+ })?;
+
+ // SAFETY: The Box pointer is valid, and this initializes the inner member.
+ unsafe { addr_of_mut!((*job.as_mut_ptr()).inner).write(inner) };
+
+ // SAFETY: All fields of the Job<T> are now initialized.
+ Ok(PendingJob(unsafe { job.assume_init() }, PhantomData))
+ }
+}
+
+/// DRM scheduler inner data
+pub struct SchedulerInner<T: JobImpl> {
+ sched: bindings::drm_gpu_scheduler,
+ _p: PhantomData<T>,
+}
+
+impl<T: JobImpl> Drop for SchedulerInner<T> {
+ fn drop(&mut self) {
+ // SAFETY: The scheduler is valid. This assumes drm_sched_fini() will take care of
+ // freeing all in-progress jobs.
+ unsafe { bindings::drm_sched_fini(&mut self.sched) };
+ }
+}
+
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Sync for SchedulerInner<T> {}
+unsafe impl<T: JobImpl> Send for SchedulerInner<T> {}
+
+/// A DRM Scheduler
+pub struct Scheduler<T: JobImpl>(Arc<SchedulerInner<T>>);
+
+impl<T: JobImpl> Scheduler<T> {
+ const OPS: bindings::drm_sched_backend_ops = bindings::drm_sched_backend_ops {
+ prepare_job: Some(prepare_job_cb::<T>),
+ can_run_job: Some(can_run_job_cb::<T>),
+ run_job: Some(run_job_cb::<T>),
+ timedout_job: Some(timedout_job_cb::<T>),
+ free_job: Some(free_job_cb::<T>),
+ };
+ /// Creates a new DRM Scheduler object
+ // TODO: Shared timeout workqueues & scores
+ pub fn new(
+ device: &impl device::RawDevice,
+ hw_submission: u32,
+ hang_limit: u32,
+ timeout_ms: usize,
+ name: &'static CStr,
+ ) -> Result<Scheduler<T>> {
+ let mut sched: UniqueArc<MaybeUninit<SchedulerInner<T>>> = UniqueArc::try_new_uninit()?;
+
+ // SAFETY: The drm_sched pointer is valid and pinned as it was just allocated above.
+ to_result(unsafe {
+ bindings::drm_sched_init(
+ addr_of_mut!((*sched.as_mut_ptr()).sched),
+ &Self::OPS,
+ hw_submission,
+ hang_limit,
+ bindings::msecs_to_jiffies(timeout_ms.try_into()?).try_into()?,
+ core::ptr::null_mut(),
+ core::ptr::null_mut(),
+ name.as_char_ptr(),
+ device.raw_device(),
+ )
+ })?;
+
+ // SAFETY: All fields of SchedulerInner are now initialized.
+ Ok(Scheduler(unsafe { sched.assume_init() }.into()))
+ }
+}
diff --git a/rust/kernel/drm/syncobj.rs b/rust/kernel/drm/syncobj.rs
new file mode 100644
index 000000000000..10eed05eb27a
--- /dev/null
+++ b/rust/kernel/drm/syncobj.rs
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM Sync Objects
+//!
+//! C header: [`include/linux/drm/drm_syncobj.h`](../../../../include/linux/drm/drm_syncobj.h)
+
+use crate::{bindings, dma_fence::*, drm, error::Result, prelude::*};
+
+/// A DRM Sync Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a drm_syncobj and we own a reference to it.
+pub struct SyncObj {
+ ptr: *mut bindings::drm_syncobj,
+}
+
+impl SyncObj {
+ /// Looks up a sync object by its handle for a given `File`.
+ pub fn lookup_handle(file: &impl drm::file::GenericFile, handle: u32) -> Result<SyncObj> {
+ // SAFETY: The arguments are all valid per the type invariants.
+ let ptr = unsafe { bindings::drm_syncobj_find(file.raw() as *mut _, handle) };
+
+ if ptr.is_null() {
+ Err(ENOENT)
+ } else {
+ Ok(SyncObj { ptr })
+ }
+ }
+
+ /// Returns the DMA fence associated with this sync object, if any.
+ pub fn fence_get(&self) -> Option<Fence> {
+ let fence = unsafe { bindings::drm_syncobj_fence_get(self.ptr) };
+ if fence.is_null() {
+ None
+ } else {
+ // SAFETY: The pointer is non-NULL and drm_syncobj_fence_get acquired an
+ // additional reference.
+ Some(unsafe { Fence::from_raw(fence) })
+ }
+ }
+
+ /// Replaces the DMA fence with a new one, or removes it if fence is None.
+ pub fn replace_fence(&self, fence: Option<&Fence>) {
+ unsafe {
+ bindings::drm_syncobj_replace_fence(
+ self.ptr,
+ fence.map_or(core::ptr::null_mut(), |a| a.raw()),
+ )
+ };
+ }
+
+ /// Adds a new timeline point to the syncobj.
+ pub fn add_point(&self, chain: FenceChain, fence: &Fence, point: u64) {
+ // SAFETY: All arguments should be valid per the respective type invariants.
+ // This takes over the FenceChain ownership.
+ unsafe { bindings::drm_syncobj_add_point(self.ptr, chain.into_raw(), fence.raw(), point) };
+ }
+}
+
+impl Drop for SyncObj {
+ fn drop(&mut self) {
+ // SAFETY: We own a reference to this syncobj.
+ unsafe { bindings::drm_syncobj_put(self.ptr) };
+ }
+}
+
+impl Clone for SyncObj {
+ fn clone(&self) -> Self {
+ // SAFETY: `ptr` is valid per the type invariant and we own a reference to it.
+ unsafe { bindings::drm_syncobj_get(self.ptr) };
+ SyncObj { ptr: self.ptr }
+ }
+}
+
+// SAFETY: drm_syncobj operations are internally locked.
+unsafe impl Sync for SyncObj {}
+unsafe impl Send for SyncObj {}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 5b9751d7ff1d..9b9f5e479207 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -4,12 +4,15 @@
//!
//! C header: [`include/uapi/asm-generic/errno-base.h`](../../../include/uapi/asm-generic/errno-base.h)
+use crate::str::CStr;
+
use alloc::{
alloc::{AllocError, LayoutError},
collections::TryReserveError,
};
use core::convert::From;
+use core::fmt;
use core::num::TryFromIntError;
use core::str::Utf8Error;
@@ -58,6 +61,117 @@ pub mod code {
declare_err!(EPIPE, "Broken pipe.");
declare_err!(EDOM, "Math argument out of domain of func.");
declare_err!(ERANGE, "Math result not representable.");
+ declare_err!(EDEADLK, "Resource deadlock would occur");
+ declare_err!(ENAMETOOLONG, "File name too long");
+ declare_err!(ENOLCK, "No record locks available");
+ declare_err!(
+ ENOSYS,
+ "Invalid system call number.",
+ "",
+ "This error code is special: arch syscall entry code will return",
+ "[`ENOSYS`] if users try to call a syscall that doesn't exist.",
+ "To keep failures of syscalls that really do exist distinguishable from",
+ "failures due to attempts to use a nonexistent syscall, syscall",
+ "implementations should refrain from returning [`ENOSYS`]."
+ );
+ declare_err!(ENOTEMPTY, "Directory not empty.");
+ declare_err!(ELOOP, "Too many symbolic links encountered.");
+ declare_err!(EWOULDBLOCK, "Operation would block.");
+ declare_err!(ENOMSG, "No message of desired type.");
+ declare_err!(EIDRM, "Identifier removed.");
+ declare_err!(ECHRNG, "Channel number out of range.");
+ declare_err!(EL2NSYNC, "Level 2 not synchronized.");
+ declare_err!(EL3HLT, "Level 3 halted.");
+ declare_err!(EL3RST, "Level 3 reset.");
+ declare_err!(ELNRNG, "Link number out of range.");
+ declare_err!(EUNATCH, "Protocol driver not attached.");
+ declare_err!(ENOCSI, "No CSI structure available.");
+ declare_err!(EL2HLT, "Level 2 halted.");
+ declare_err!(EBADE, "Invalid exchange.");
+ declare_err!(EBADR, "Invalid request descriptor.");
+ declare_err!(EXFULL, "Exchange full.");
+ declare_err!(ENOANO, "No anode.");
+ declare_err!(EBADRQC, "Invalid request code.");
+ declare_err!(EBADSLT, "Invalid slot.");
+ declare_err!(EDEADLOCK, "Resource deadlock would occur.");
+ declare_err!(EBFONT, "Bad font file format.");
+ declare_err!(ENOSTR, "Device not a stream.");
+ declare_err!(ENODATA, "No data available.");
+ declare_err!(ETIME, "Timer expired.");
+ declare_err!(ENOSR, "Out of streams resources.");
+ declare_err!(ENONET, "Machine is not on the network.");
+ declare_err!(ENOPKG, "Package not installed.");
+ declare_err!(EREMOTE, "Object is remote.");
+ declare_err!(ENOLINK, "Link has been severed.");
+ declare_err!(EADV, "Advertise error.");
+ declare_err!(ESRMNT, "Srmount error.");
+ declare_err!(ECOMM, "Communication error on send.");
+ declare_err!(EPROTO, "Protocol error.");
+ declare_err!(EMULTIHOP, "Multihop attempted.");
+ declare_err!(EDOTDOT, "RFS specific error.");
+ declare_err!(EBADMSG, "Not a data message.");
+ declare_err!(EOVERFLOW, "Value too large for defined data type.");
+ declare_err!(ENOTUNIQ, "Name not unique on network.");
+ declare_err!(EBADFD, "File descriptor in bad state.");
+ declare_err!(EREMCHG, "Remote address changed.");
+ declare_err!(ELIBACC, "Can not access a needed shared library.");
+ declare_err!(ELIBBAD, "Accessing a corrupted shared library.");
+ declare_err!(ELIBSCN, ".lib section in a.out corrupted.");
+ declare_err!(ELIBMAX, "Attempting to link in too many shared libraries.");
+ declare_err!(ELIBEXEC, "Cannot exec a shared library directly.");
+ declare_err!(EILSEQ, "Illegal byte sequence.");
+ declare_err!(ERESTART, "Interrupted system call should be restarted.");
+ declare_err!(ESTRPIPE, "Streams pipe error.");
+ declare_err!(EUSERS, "Too many users.");
+ declare_err!(ENOTSOCK, "Socket operation on non-socket.");
+ declare_err!(EDESTADDRREQ, "Destination address required.");
+ declare_err!(EMSGSIZE, "Message too long.");
+ declare_err!(EPROTOTYPE, "Protocol wrong type for socket.");
+ declare_err!(ENOPROTOOPT, "Protocol not available.");
+ declare_err!(EPROTONOSUPPORT, "Protocol not supported.");
+ declare_err!(ESOCKTNOSUPPORT, "Socket type not supported.");
+ declare_err!(EOPNOTSUPP, "Operation not supported on transport endpoint.");
+ declare_err!(EPFNOSUPPORT, "Protocol family not supported.");
+ declare_err!(EAFNOSUPPORT, "Address family not supported by protocol.");
+ declare_err!(EADDRINUSE, "Address already in use.");
+ declare_err!(EADDRNOTAVAIL, "Cannot assign requested address.");
+ declare_err!(ENETDOWN, "Network is down.");
+ declare_err!(ENETUNREACH, "Network is unreachable.");
+ declare_err!(ENETRESET, "Network dropped connection because of reset.");
+ declare_err!(ECONNABORTED, "Software caused connection abort.");
+ declare_err!(ECONNRESET, "Connection reset by peer.");
+ declare_err!(ENOBUFS, "No buffer space available.");
+ declare_err!(EISCONN, "Transport endpoint is already connected.");
+ declare_err!(ENOTCONN, "Transport endpoint is not connected.");
+ declare_err!(ESHUTDOWN, "Cannot send after transport endpoint shutdown.");
+ declare_err!(ETOOMANYREFS, "Too many references: cannot splice.");
+ declare_err!(ETIMEDOUT, "Connection timed out.");
+ declare_err!(ECONNREFUSED, "Connection refused.");
+ declare_err!(EHOSTDOWN, "Host is down.");
+ declare_err!(EHOSTUNREACH, "No route to host.");
+ declare_err!(EALREADY, "Operation already in progress.");
+ declare_err!(EINPROGRESS, "Operation now in progress.");
+ declare_err!(ESTALE, "Stale file handle.");
+ declare_err!(EUCLEAN, "Structure needs cleaning.");
+ declare_err!(ENOTNAM, "Not a XENIX named type file.");
+ declare_err!(ENAVAIL, "No XENIX semaphores available.");
+ declare_err!(EISNAM, "Is a named type file.");
+ declare_err!(EREMOTEIO, "Remote I/O error.");
+ declare_err!(EDQUOT, "Quota exceeded.");
+ declare_err!(ENOMEDIUM, "No medium found.");
+ declare_err!(EMEDIUMTYPE, "Wrong medium type.");
+ declare_err!(ECANCELED, "Operation Canceled.");
+ declare_err!(ENOKEY, "Required key not available.");
+ declare_err!(EKEYEXPIRED, "Key has expired.");
+ declare_err!(EKEYREVOKED, "Key has been revoked.");
+ declare_err!(EKEYREJECTED, "Key was rejected by service.");
+ declare_err!(EOWNERDEAD, "Owner died.", "", "For robust mutexes.");
+ declare_err!(ENOTRECOVERABLE, "State not recoverable.");
+ declare_err!(ERFKILL, "Operation not possible due to RF-kill.");
+ declare_err!(EHWPOISON, "Memory page has hardware error.");
+ declare_err!(ERESTARTSYS, "Restart the system call.");
+ declare_err!(ENOTSUPP, "Operation is not supported.");
+ declare_err!(ENOPARAM, "Parameter not supported.");
}
/// Generic integer kernel error.
@@ -72,10 +186,72 @@ pub mod code {
pub struct Error(core::ffi::c_int);
impl Error {
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// It is a bug to pass an out-of-range `errno`. `EINVAL` would
+ /// be returned in such a case.
+ pub(crate) fn from_kernel_errno(errno: core::ffi::c_int) -> Error {
+ if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+ // TODO: Make it a `WARN_ONCE` once available.
+ crate::pr_warn!(
+ "attempted to create `Error` with out of range `errno`: {}",
+ errno
+ );
+ return code::EINVAL;
+ }
+
+ // INVARIANT: The check above ensures the type invariant
+ // will hold.
+ Error(errno)
+ }
+
/// Returns the kernel error code.
pub fn to_kernel_errno(self) -> core::ffi::c_int {
self.0
}
+
+ /// Returns the error encoded as a pointer.
+ #[allow(dead_code)]
+ pub(crate) fn to_ptr<T>(self) -> *mut T {
+ // SAFETY: Valid as long as self.0 is a valid error
+ unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
+ }
+
+ /// Returns a string representing the error, if one exists.
+ #[cfg(not(testlib))]
+ pub fn name(&self) -> Option<&'static CStr> {
+ // SAFETY: Just an FFI call, there are no extra safety requirements.
+ let ptr = unsafe { bindings::errname(-self.0) };
+ if ptr.is_null() {
+ None
+ } else {
+ // SAFETY: The string returned by `errname` is static and `NUL`-terminated.
+ Some(unsafe { CStr::from_char_ptr(ptr) })
+ }
+ }
+
+ /// Returns a string representing the error, if one exists.
+ ///
+ /// When `testlib` is configured, this always returns `None` to avoid the dependency on a
+ /// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still
+ /// run in userspace.
+ #[cfg(testlib)]
+ pub fn name(&self) -> Option<&'static CStr> {
+ None
+ }
+}
+
+impl fmt::Debug for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.name() {
+ // Print out number if no name can be found.
+ None => f.debug_tuple("Error").field(&-self.0).finish(),
+ // SAFETY: These strings are ASCII-only.
+ Some(name) => f
+ .debug_tuple(unsafe { core::str::from_utf8_unchecked(name) })
+ .finish(),
+ }
+ }
}
impl From<AllocError> for Error {
@@ -141,3 +317,108 @@ impl From<core::convert::Infallible> for Error {
/// it should still be modeled as returning a `Result` rather than
/// just an [`Error`].
pub type Result<T = ()> = core::result::Result<T, Error>;
+
+/// Converts an integer as returned by a C kernel function to an error if it's negative, and
+/// `Ok(())` otherwise.
+pub fn to_result(err: core::ffi::c_int) -> Result {
+ if err < 0 {
+ Err(Error::from_kernel_errno(err))
+ } else {
+ Ok(())
+ }
+}
+
+/// Transform a kernel "error pointer" to a normal pointer.
+///
+/// Some kernel C API functions return an "error pointer" which optionally
+/// embeds an `errno`. Callers are supposed to check the returned pointer
+/// for errors. This function performs the check and converts the "error pointer"
+/// to a normal pointer in an idiomatic fashion.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::from_kernel_err_ptr;
+/// # use kernel::bindings;
+/// fn devm_platform_ioremap_resource(
+/// pdev: &mut PlatformDevice,
+/// index: u32,
+/// ) -> Result<*mut core::ffi::c_void> {
+/// // SAFETY: FFI call.
+/// unsafe {
+/// from_kernel_err_ptr(bindings::devm_platform_ioremap_resource(
+/// pdev.to_ptr(),
+/// index,
+/// ))
+/// }
+/// }
+/// ```
+// TODO: Remove `dead_code` marker once an in-kernel client is available.
+#[allow(dead_code)]
+pub(crate) fn from_kernel_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+ // CAST: Casting a pointer to `*const core::ffi::c_void` is always valid.
+ let const_ptr: *const core::ffi::c_void = ptr.cast();
+ // SAFETY: The FFI function does not deref the pointer.
+ if unsafe { bindings::IS_ERR(const_ptr) } {
+ // SAFETY: The FFI function does not deref the pointer.
+ let err = unsafe { bindings::PTR_ERR(const_ptr) };
+ // CAST: If `IS_ERR()` returns `true`,
+ // then `PTR_ERR()` is guaranteed to return a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`,
+ // which always fits in an `i16`, as per the invariant above.
+ // And an `i16` always fits in an `i32`. So casting `err` to
+ // an `i32` can never overflow, and is always valid.
+ //
+ // SAFETY: `IS_ERR()` ensures `err` is a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`.
+ #[cfg_attr(CONFIG_ARM, allow(clippy::unnecessary_cast))]
+ return Err(Error(err as i32));
+ }
+ Ok(ptr)
+}
+
+pub(crate) fn from_kernel_result_helper<T>(r: Result<T>) -> T
+where
+ T: From<i16>,
+{
+ match r {
+ Ok(v) => v,
+ // NO-OVERFLOW: negative `errno`s are no smaller than `-bindings::MAX_ERRNO`,
+ // `-bindings::MAX_ERRNO` fits in an `i16` as per invariant above,
+ // therefore a negative `errno` always fits in an `i16` and will not overflow.
+ Err(e) => T::from(e.to_kernel_errno() as i16),
+ }
+}
+
+/// Transforms a [`crate::error::Result<T>`] to a kernel C integer result.
+///
+/// This is useful when calling Rust functions that return [`crate::error::Result<T>`]
+/// from inside `extern "C"` functions that need to return an integer
+/// error result.
+///
+/// `T` should be convertible to an `i16` via `From<i16>`.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::from_kernel_result;
+/// # use kernel::bindings;
+/// unsafe extern "C" fn probe_callback(
+/// pdev: *mut bindings::platform_device,
+/// ) -> core::ffi::c_int {
+/// from_kernel_result! {
+/// let ptr = devm_alloc(pdev)?;
+/// bindings::platform_set_drvdata(pdev, ptr);
+/// Ok(0)
+/// }
+/// }
+/// ```
+macro_rules! from_kernel_result {
+ ($($tt:tt)*) => {{
+ $crate::error::from_kernel_result_helper((|| {
+ $($tt)*
+ })())
+ }};
+}
+
+pub(crate) use from_kernel_result;
diff --git a/rust/kernel/io_buffer.rs b/rust/kernel/io_buffer.rs
new file mode 100644
index 000000000000..d5a258a5ff8f
--- /dev/null
+++ b/rust/kernel/io_buffer.rs
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Buffers used in IO.
+
+use crate::error::Result;
+use alloc::vec::Vec;
+use core::mem::{size_of, MaybeUninit};
+
+/// Represents a buffer to be read from during IO.
+pub trait IoBufferReader {
+ /// Returns the number of bytes left to be read from the io buffer.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ fn len(&self) -> usize;
+
+ /// Returns `true` if no data is available in the io buffer.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Reads raw data from the io buffer into a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The output buffer must be valid.
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result;
+
+ /// Reads all data remaining in the io buffer.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to mapped, readable memory.
+ fn read_all(&mut self) -> Result<Vec<u8>> {
+ let mut data = Vec::<u8>::new();
+ data.try_resize(self.len(), 0)?;
+
+ // SAFETY: The output buffer is valid as we just allocated it.
+ unsafe { self.read_raw(data.as_mut_ptr(), data.len())? };
+ Ok(data)
+ }
+
+ /// Reads a byte slice from the io buffer.
+ ///
+ /// Returns `EFAULT` if the byte slice is bigger than the remaining size of the user slice or
+ /// if the address does not currently point to mapped, readable memory.
+ fn read_slice(&mut self, data: &mut [u8]) -> Result {
+ // SAFETY: The output buffer is valid as it's coming from a live reference.
+ unsafe { self.read_raw(data.as_mut_ptr(), data.len()) }
+ }
+
+ /// Reads the contents of a plain old data (POD) type from the io buffer.
+ fn read<T: ReadableFromBytes>(&mut self) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ // SAFETY: The buffer is valid as it was just allocated.
+ unsafe { self.read_raw(out.as_mut_ptr() as _, size_of::<T>()) }?;
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+}
+
+/// Represents a buffer to be written to during IO.
+pub trait IoBufferWriter {
+ /// Returns the number of bytes left to be written into the io buffer.
+ ///
+ /// Note that even writing less than this number of bytes may fail.
+ fn len(&self) -> usize;
+
+ /// Returns `true` if the io buffer cannot hold any additional data.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Writes zeroes to the io buffer.
+ ///
+ /// Differently from the other write functions, `clear` will zero as much as it can and update
+ /// the writer internal state to reflect this. It will, however, return an error if it cannot
+ /// clear `len` bytes.
+ ///
+ /// For example, if a caller requests that 100 bytes be cleared but a segfault happens after
+ /// 20 bytes, then EFAULT is returned and the writer is advanced by 20 bytes.
+ fn clear(&mut self, len: usize) -> Result;
+
+ /// Writes a byte slice into the io buffer.
+ ///
+ /// Returns `EFAULT` if the byte slice is bigger than the remaining size of the io buffer or if
+ /// the address does not currently point to mapped, writable memory.
+ fn write_slice(&mut self, data: &[u8]) -> Result {
+ // SAFETY: The input buffer is valid as it's coming from a live reference.
+ unsafe { self.write_raw(data.as_ptr(), data.len()) }
+ }
+
+ /// Writes raw data to the io buffer from a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The input buffer must be valid.
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result;
+
+ /// Writes the contents of the given data into the io buffer.
+ fn write<T: WritableToBytes>(&mut self, data: &T) -> Result {
+ // SAFETY: The input buffer is valid as it's coming from a live
+ // reference to a type that implements `WritableToBytes`.
+ unsafe { self.write_raw(data as *const T as _, size_of::<T>()) }
+ }
+}
+
+/// Specifies that a type is safely readable from byte slices.
+///
+/// Not all types can be safely read from byte slices; examples from
+/// <https://doc.rust-lang.org/reference/behavior-considered-undefined.html> include `bool`
+/// that must be either `0` or `1`, and `char` that cannot be a surrogate or above `char::MAX`.
+///
+/// # Safety
+///
+/// Implementers must ensure that the type is made up only of types that can be safely read from
+/// arbitrary byte sequences (e.g., `u32`, `u64`, etc.).
+pub unsafe trait ReadableFromBytes {}
+
+// SAFETY: All bit patterns are acceptable values of the types below.
+unsafe impl ReadableFromBytes for u8 {}
+unsafe impl ReadableFromBytes for u16 {}
+unsafe impl ReadableFromBytes for u32 {}
+unsafe impl ReadableFromBytes for u64 {}
+unsafe impl ReadableFromBytes for usize {}
+unsafe impl ReadableFromBytes for i8 {}
+unsafe impl ReadableFromBytes for i16 {}
+unsafe impl ReadableFromBytes for i32 {}
+unsafe impl ReadableFromBytes for i64 {}
+unsafe impl ReadableFromBytes for isize {}
+
+/// Specifies that a type is safely writable to byte slices.
+///
+/// This means that we don't read undefined values (which leads to UB) in preparation for writing
+/// to the byte slice. It also ensures that no potentially sensitive information is leaked into the
+/// byte slices.
+///
+/// # Safety
+///
+/// A type must not include padding bytes and must be fully initialised to safely implement
+/// [`WritableToBytes`] (i.e., it doesn't contain [`MaybeUninit`] fields). A composition of
+/// writable types in a structure is not necessarily writable because it may result in padding
+/// bytes.
+pub unsafe trait WritableToBytes {}
+
+// SAFETY: Initialised instances of the following types have no uninitialised portions.
+unsafe impl WritableToBytes for u8 {}
+unsafe impl WritableToBytes for u16 {}
+unsafe impl WritableToBytes for u32 {}
+unsafe impl WritableToBytes for u64 {}
+unsafe impl WritableToBytes for usize {}
+unsafe impl WritableToBytes for i8 {}
+unsafe impl WritableToBytes for i16 {}
+unsafe impl WritableToBytes for i32 {}
+unsafe impl WritableToBytes for i64 {}
+unsafe impl WritableToBytes for isize {}
diff --git a/rust/kernel/io_mem.rs b/rust/kernel/io_mem.rs
new file mode 100644
index 000000000000..5bb8800b04f5
--- /dev/null
+++ b/rust/kernel/io_mem.rs
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory-mapped IO.
+//!
+//! C header: [`include/asm-generic/io.h`](../../../../include/asm-generic/io.h)
+
+#![allow(dead_code)]
+
+use crate::{bindings, error::code::*, error::Result};
+use core::convert::TryInto;
+
+/// The type of `Resource`.
+pub enum IoResource {
+ /// i/o memory
+ Mem = bindings::IORESOURCE_MEM as _,
+}
+
+/// Represents a memory resource.
+pub struct Resource {
+ offset: bindings::resource_size_t,
+ size: bindings::resource_size_t,
+ flags: core::ffi::c_ulong,
+}
+
+impl Resource {
+ pub(crate) fn new(
+ start: bindings::resource_size_t,
+ end: bindings::resource_size_t,
+ flags: core::ffi::c_ulong,
+ ) -> Option<Self> {
+ if start == 0 {
+ return None;
+ }
+ Some(Self {
+ offset: start,
+ size: end.checked_sub(start)?.checked_add(1)?,
+ flags,
+ })
+ }
+}
+
+/// Represents a memory block of at least `SIZE` bytes.
+///
+/// # Invariants
+///
+/// `ptr` is a non-null and valid address of at least `SIZE` bytes and returned by an `ioremap`
+/// variant. `ptr` is also 8-byte aligned.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::io_mem::{IoMem, Resource};
+///
+/// fn test(res: Resource) -> Result {
+/// // Create an io mem block of at least 100 bytes.
+/// // SAFETY: No DMA operations are initiated through `mem`.
+/// let mem = unsafe { IoMem::<100>::try_new(res) }?;
+///
+/// // Read one byte from offset 10.
+/// let v = mem.readb(10);
+///
+/// // Write value to offset 20.
+/// mem.writeb(v, 20);
+///
+/// Ok(())
+/// }
+/// ```
+pub struct IoMem<const SIZE: usize> {
+ ptr: usize,
+}
+
+macro_rules! define_read {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ /// Reads IO data from the given offset known, at compile time.
+ ///
+ /// If the offset is not known at compile time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, offset: usize) -> $type_name {
+ Self::check_offset::<$type_name>(offset);
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // guarantees that the code won't build if `offset` makes the read go out of bounds
+ // (including the type size).
+ unsafe { bindings::$name(ptr as _) }
+ }
+
+ /// Reads IO data from the given offset.
+ ///
+ /// It fails if/when the offset (plus the type size) is out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
+ if !Self::offset_ok::<$type_name>(offset) {
+ return Err(EINVAL);
+ }
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // returns an error if `offset` would make the read go out of bounds (including the
+ // type size).
+ Ok(unsafe { bindings::$name(ptr as _) })
+ }
+ };
+}
+
+macro_rules! define_write {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ /// Writes IO data to the given offset, known at compile time.
+ ///
+ /// If the offset is not known at compile time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, value: $type_name, offset: usize) {
+ Self::check_offset::<$type_name>(offset);
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // guarantees that the code won't link if `offset` makes the write go out of bounds
+ // (including the type size).
+ unsafe { bindings::$name(value, ptr as _) }
+ }
+
+ /// Writes IO data to the given offset.
+ ///
+ /// It fails if/when the offset (plus the type size) is out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
+ if !Self::offset_ok::<$type_name>(offset) {
+ return Err(EINVAL);
+ }
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // returns an error if `offset` would make the write go out of bounds (including the
+ // type size).
+ unsafe { bindings::$name(value, ptr as _) };
+ Ok(())
+ }
+ };
+}
+
+impl<const SIZE: usize> IoMem<SIZE> {
+ /// Tries to create a new instance of a memory block.
+ ///
+ /// The resource described by `res` is mapped into the CPU's address space so that it can be
+ /// accessed directly. It is also consumed by this function so that it can't be mapped again
+ /// to a different address.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that either (a) the resulting interface cannot be used to initiate DMA
+ /// operations, or (b) that DMA operations initiated via the returned interface use DMA handles
+ /// allocated through the `dma` module.
+ pub unsafe fn try_new(res: Resource) -> Result<Self> {
+ // Check that the resource has at least `SIZE` bytes in it.
+ if res.size < SIZE.try_into()? {
+ return Err(EINVAL);
+ }
+
+ // To be able to check pointers at compile time based only on offsets, we need to guarantee
+ // that the base pointer is minimally aligned. So we conservatively expect at least 8 bytes.
+ if res.offset % 8 != 0 {
+ crate::pr_err!("Physical address is not 64-bit aligned: {:x}", res.offset);
+ return Err(EDOM);
+ }
+
+ // Try to map the resource.
+ // SAFETY: Just mapping the memory range.
+ let addr = if res.flags & (bindings::IORESOURCE_MEM_NONPOSTED as core::ffi::c_ulong) != 0 {
+ unsafe { bindings::ioremap_np(res.offset, res.size as _) }
+ } else {
+ unsafe { bindings::ioremap(res.offset, res.size as _) }
+ };
+
+ if addr.is_null() {
+ Err(ENOMEM)
+ } else {
+ // INVARIANT: `addr` is non-null and was returned by `ioremap`, so it is valid. It is
+ // also 8-byte aligned because we checked it above.
+ Ok(Self { ptr: addr as usize })
+ }
+ }
+
+ #[inline]
+ const fn offset_ok<T>(offset: usize) -> bool {
+ let type_size = core::mem::size_of::<T>();
+ if let Some(end) = offset.checked_add(type_size) {
+ end <= SIZE && offset % type_size == 0
+ } else {
+ false
+ }
+ }
+
+ fn offset_ok_of_val<T: ?Sized>(offset: usize, value: &T) -> bool {
+ let value_size = core::mem::size_of_val(value);
+ let value_alignment = core::mem::align_of_val(value);
+ if let Some(end) = offset.checked_add(value_size) {
+ end <= SIZE && offset % value_alignment == 0
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ const fn check_offset<T>(offset: usize) {
+ crate::build_assert!(Self::offset_ok::<T>(offset), "IoMem offset overflow");
+ }
+
+ /// Copy memory block from an i/o memory by filling the specified buffer with it.
+ ///
+ /// # Examples
+ /// ```
+ /// use kernel::io_mem::{self, IoMem, Resource};
+ ///
+ /// fn test(res: Resource) -> Result {
+ /// // Create an i/o memory block of at least 100 bytes.
+ /// let mem = unsafe { IoMem::<100>::try_new(res) }?;
+ ///
+ /// let mut buffer: [u8; 32] = [0; 32];
+ ///
+ /// // Memcpy 16 bytes from an offset 10 of i/o memory block into the buffer.
+ /// mem.try_memcpy_fromio(&mut buffer[..16], 10)?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn try_memcpy_fromio(&self, buffer: &mut [u8], offset: usize) -> Result {
+ if !Self::offset_ok_of_val(offset, buffer) {
+ return Err(EINVAL);
+ }
+
+ let ptr = self.ptr.wrapping_add(offset);
+
+ // SAFETY:
+ // - The type invariants guarantee that `ptr` is a valid pointer.
+ // - The bounds of `buffer` are checked with a call to `offset_ok_of_val()`.
+ unsafe {
+ bindings::memcpy_fromio(
+ buffer.as_mut_ptr() as *mut _,
+ ptr as *const _,
+ buffer.len() as _,
+ )
+ };
+ Ok(())
+ }
+
+ define_read!(readb, try_readb, u8);
+ define_read!(readw, try_readw, u16);
+ define_read!(readl, try_readl, u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ readq,
+ try_readq,
+ u64
+ );
+
+ define_read!(readb_relaxed, try_readb_relaxed, u8);
+ define_read!(readw_relaxed, try_readw_relaxed, u16);
+ define_read!(readl_relaxed, try_readl_relaxed, u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ readq_relaxed,
+ try_readq_relaxed,
+ u64
+ );
+
+ define_write!(writeb, try_writeb, u8);
+ define_write!(writew, try_writew, u16);
+ define_write!(writel, try_writel, u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ writeq,
+ try_writeq,
+ u64
+ );
+
+ define_write!(writeb_relaxed, try_writeb_relaxed, u8);
+ define_write!(writew_relaxed, try_writew_relaxed, u16);
+ define_write!(writel_relaxed, try_writel_relaxed, u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ writeq_relaxed,
+ try_writeq_relaxed,
+ u64
+ );
+}
+
+impl<const SIZE: usize> Drop for IoMem<SIZE> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariant, `self.ptr` is a value returned by a previous successful
+ // call to `ioremap`.
+ unsafe { bindings::iounmap(self.ptr as _) };
+ }
+}
diff --git a/rust/kernel/io_pgtable.rs b/rust/kernel/io_pgtable.rs
new file mode 100644
index 000000000000..4c90adefcd24
--- /dev/null
+++ b/rust/kernel/io_pgtable.rs
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IOMMU page table management
+//!
+//! C header: [`include/io-pgtable.h`](../../../../include/io-pgtable.h)
+
+use crate::{
+ bindings, device,
+ error::{code::*, to_result, Result},
+ types::{ForeignOwnable, ScopeGuard},
+};
+
+use core::marker::PhantomData;
+use core::mem;
+use core::num::NonZeroU64;
+
+/// Protection flags used with IOMMU mappings.
+pub mod prot {
+ /// Read access.
+ pub const READ: u32 = bindings::IOMMU_READ;
+ /// Write access.
+ pub const WRITE: u32 = bindings::IOMMU_WRITE;
+ /// Request cache coherency.
+ pub const CACHE: u32 = bindings::IOMMU_CACHE;
+ /// Request no-execute permission.
+ pub const NOEXEC: u32 = bindings::IOMMU_NOEXEC;
+ /// MMIO peripheral mapping.
+ pub const MMIO: u32 = bindings::IOMMU_MMIO;
+ /// Privileged mapping.
+ pub const PRIV: u32 = bindings::IOMMU_PRIV;
+}
+
+/// Represents a requested io_pgtable configuration.
+pub struct Config {
+ /// Quirk bitmask (type-specific).
+ pub quirks: usize,
+ /// Valid page sizes, as a bitmask of powers of two.
+ pub pgsize_bitmap: usize,
+ /// Input address space size in bits.
+ pub ias: usize,
+ /// Output address space size in bits.
+ pub oas: usize,
+ /// IOMMU uses coherent accesses for page table walks.
+ pub coherent_walk: bool,
+}
+
+/// IOMMU callbacks for TLB and page table management.
+///
+/// Users must implement this trait to perform the TLB flush actions for this IOMMU, if
+/// required.
+pub trait FlushOps {
+ /// User-specified type owned by the IOPagetable that will be passed to TLB operations.
+ type Data: ForeignOwnable + Send + Sync;
+
+ /// Synchronously invalidate the entire TLB context.
+ fn tlb_flush_all(data: <Self::Data as ForeignOwnable>::Borrowed<'_>);
+
+ /// Synchronously invalidate all intermediate TLB state (sometimes referred to as the "walk
+ /// cache") for a virtual address range.
+ fn tlb_flush_walk(
+ data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+ iova: usize,
+ size: usize,
+ granule: usize,
+ );
+
+ /// Optional callback to queue up leaf TLB invalidation for a single page.
+ ///
+ /// IOMMUs that cannot batch TLB invalidation operations efficiently will typically issue
+ /// them here, but others may decide to update the iommu_iotlb_gather structure and defer
+ /// the invalidation until iommu_iotlb_sync() instead.
+ ///
+ /// TODO: Implement the gather argument for batching.
+ fn tlb_add_page(
+ data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+ iova: usize,
+ granule: usize,
+ );
+}
+
+/// Inner page table info shared across all table types.
+/// # Invariants
+///
+/// - [`self.ops`] is valid and non-null.
+/// - [`self.cfg`] is valid and non-null.
+#[doc(hidden)]
+pub struct IoPageTableInner {
+ ops: *mut bindings::io_pgtable_ops,
+ cfg: bindings::io_pgtable_cfg,
+ data: *mut core::ffi::c_void,
+}
+
+/// Helper trait to get the config type for a single page table type from the union.
+pub trait GetConfig {
+ /// Returns the specific output configuration for this page table type.
+ fn cfg(iopt: &impl IoPageTable) -> &Self
+ where
+ Self: Sized;
+}
+
+/// A generic IOMMU page table
+pub trait IoPageTable: crate::private::Sealed {
+ #[doc(hidden)]
+ const FLUSH_OPS: bindings::iommu_flush_ops;
+
+ #[doc(hidden)]
+ fn new_fmt<T: FlushOps>(
+ dev: &dyn device::RawDevice,
+ format: u32,
+ config: Config,
+ data: T::Data,
+ ) -> Result<IoPageTableInner> {
+ let ptr = data.into_foreign() as *mut _;
+ let guard = ScopeGuard::new(|| {
+ // SAFETY: `ptr` came from a previous call to `into_foreign`.
+ unsafe { T::Data::from_foreign(ptr) };
+ });
+
+ let mut raw_cfg = bindings::io_pgtable_cfg {
+ quirks: config.quirks.try_into()?,
+ pgsize_bitmap: config.pgsize_bitmap.try_into()?,
+ ias: config.ias.try_into()?,
+ oas: config.oas.try_into()?,
+ coherent_walk: config.coherent_walk,
+ tlb: &Self::FLUSH_OPS,
+ iommu_dev: dev.raw_device(),
+ __bindgen_anon_1: unsafe { mem::zeroed() },
+ };
+
+ let ops = unsafe {
+ bindings::alloc_io_pgtable_ops(format as bindings::io_pgtable_fmt, &mut raw_cfg, ptr)
+ };
+
+ if ops.is_null() {
+ return Err(EINVAL);
+ }
+
+ guard.dismiss();
+ Ok(IoPageTableInner {
+ ops,
+ cfg: raw_cfg,
+ data: ptr,
+ })
+ }
+
+ /// Map a range of pages.
+ fn map_pages(
+ &mut self,
+ iova: usize,
+ paddr: usize,
+ pgsize: usize,
+ pgcount: usize,
+ prot: u32,
+ ) -> Result<usize> {
+ let mut mapped: usize = 0;
+
+ to_result(unsafe {
+ (*self.inner_mut().ops).map_pages.unwrap()(
+ self.inner_mut().ops,
+ iova as u64,
+ paddr as u64,
+ pgsize,
+ pgcount,
+ prot as i32,
+ bindings::GFP_KERNEL,
+ &mut mapped,
+ )
+ })?;
+
+ Ok(mapped)
+ }
+
+ /// Unmap a range of pages.
+ fn unmap_pages(
+ &mut self,
+ iova: usize,
+ pgsize: usize,
+ pgcount: usize,
+ // TODO: gather: *mut iommu_iotlb_gather,
+ ) -> usize {
+ unsafe {
+ (*self.inner_mut().ops).unmap_pages.unwrap()(
+ self.inner_mut().ops,
+ iova as u64,
+ pgsize,
+ pgcount,
+ core::ptr::null_mut(),
+ )
+ }
+ }
+
+ /// Translate an IOVA to the corresponding physical address, if mapped.
+ fn iova_to_phys(&mut self, iova: usize) -> Option<NonZeroU64> {
+ NonZeroU64::new(unsafe {
+ (*self.inner_mut().ops).iova_to_phys.unwrap()(self.inner_mut().ops, iova as u64)
+ })
+ }
+
+ #[doc(hidden)]
+ fn inner_mut(&mut self) -> &mut IoPageTableInner;
+
+ #[doc(hidden)]
+ fn inner(&self) -> &IoPageTableInner;
+
+ #[doc(hidden)]
+ fn raw_cfg(&self) -> &bindings::io_pgtable_cfg {
+ &self.inner().cfg
+ }
+}
+
+unsafe impl Send for IoPageTableInner {}
+unsafe impl Sync for IoPageTableInner {}
+
+unsafe extern "C" fn tlb_flush_all_callback<T: FlushOps>(cookie: *mut core::ffi::c_void) {
+ T::tlb_flush_all(unsafe { T::Data::borrow(cookie) });
+}
+
+unsafe extern "C" fn tlb_flush_walk_callback<T: FlushOps>(
+ iova: core::ffi::c_ulong,
+ size: usize,
+ granule: usize,
+ cookie: *mut core::ffi::c_void,
+) {
+ T::tlb_flush_walk(
+ unsafe { T::Data::borrow(cookie) },
+ iova as usize,
+ size,
+ granule,
+ );
+}
+
+unsafe extern "C" fn tlb_add_page_callback<T: FlushOps>(
+ _gather: *mut bindings::iommu_iotlb_gather,
+ iova: core::ffi::c_ulong,
+ granule: usize,
+ cookie: *mut core::ffi::c_void,
+) {
+ T::tlb_add_page(unsafe { T::Data::borrow(cookie) }, iova as usize, granule);
+}
+
+macro_rules! iopt_cfg {
+ ($name:ident, $field:ident, $type:ident) => {
+ /// An IOMMU page table configuration for a specific kind of pagetable.
+ pub type $name = bindings::$type;
+
+ impl GetConfig for $name {
+ fn cfg(iopt: &impl IoPageTable) -> &$name {
+ unsafe { &iopt.raw_cfg().__bindgen_anon_1.$field }
+ }
+ }
+ };
+}
+
+impl GetConfig for () {
+ fn cfg(_iopt: &impl IoPageTable) -> &() {
+ &()
+ }
+}
+
+macro_rules! iopt_type {
+ ($type:ident, $cfg:ty, $fmt:ident) => {
+ /// Represents an IOPagetable of this type.
+ pub struct $type<T: FlushOps>(IoPageTableInner, PhantomData<T>);
+
+ impl<T: FlushOps> $type<T> {
+ /// Creates a new IOPagetable implementation of this type.
+ pub fn new(dev: &dyn device::RawDevice, config: Config, data: T::Data) -> Result<Self> {
+ Ok(Self(
+ <Self as IoPageTable>::new_fmt::<T>(dev, bindings::$fmt, config, data)?,
+ PhantomData,
+ ))
+ }
+
+ /// Get the configuration for this IOPagetable.
+ pub fn cfg(&self) -> &$cfg {
+ <$cfg as GetConfig>::cfg(self)
+ }
+ }
+
+ impl<T: FlushOps> crate::private::Sealed for $type<T> {}
+
+ impl<T: FlushOps> IoPageTable for $type<T> {
+ const FLUSH_OPS: bindings::iommu_flush_ops = bindings::iommu_flush_ops {
+ tlb_flush_all: Some(tlb_flush_all_callback::<T>),
+ tlb_flush_walk: Some(tlb_flush_walk_callback::<T>),
+ tlb_add_page: Some(tlb_add_page_callback::<T>),
+ };
+
+ fn inner(&self) -> &IoPageTableInner {
+ &self.0
+ }
+
+ fn inner_mut(&mut self) -> &mut IoPageTableInner {
+ &mut self.0
+ }
+ }
+
+ impl<T: FlushOps> Drop for $type<T> {
+ fn drop(&mut self) {
+ // SAFETY: The pointer is valid by the type invariant.
+ unsafe { bindings::free_io_pgtable_ops(self.0.ops) };
+
+ // Free context data.
+ //
+ // SAFETY: This matches the call to `into_foreign` from `new` in the success case.
+ unsafe { T::Data::from_foreign(self.0.data) };
+ }
+ }
+ };
+}
+
+// Ew...
+iopt_cfg!(
+ ARMLPAES1Cfg,
+ arm_lpae_s1_cfg,
+ io_pgtable_cfg__bindgen_ty_1__bindgen_ty_1
+);
+iopt_cfg!(
+ ARMLPAES2Cfg,
+ arm_lpae_s2_cfg,
+ io_pgtable_cfg__bindgen_ty_1__bindgen_ty_2
+);
+iopt_cfg!(
+ ARMv7SCfg,
+ arm_v7s_cfg,
+ io_pgtable_cfg__bindgen_ty_1__bindgen_ty_3
+);
+iopt_cfg!(
+ ARMMaliLPAECfg,
+ arm_mali_lpae_cfg,
+ io_pgtable_cfg__bindgen_ty_1__bindgen_ty_4
+);
+iopt_cfg!(
+ AppleDARTCfg,
+ apple_dart_cfg,
+ io_pgtable_cfg__bindgen_ty_1__bindgen_ty_5
+);
+iopt_cfg!(
+ AppleUATCfg,
+ apple_uat_cfg,
+ io_pgtable_cfg__bindgen_ty_1__bindgen_ty_6
+);
+
+iopt_type!(ARM32LPAES1, ARMLPAES1Cfg, io_pgtable_fmt_ARM_32_LPAE_S1);
+iopt_type!(ARM32LPAES2, ARMLPAES2Cfg, io_pgtable_fmt_ARM_32_LPAE_S2);
+iopt_type!(ARM64LPAES1, ARMLPAES1Cfg, io_pgtable_fmt_ARM_64_LPAE_S1);
+iopt_type!(ARM64LPAES2, ARMLPAES2Cfg, io_pgtable_fmt_ARM_64_LPAE_S2);
+iopt_type!(ARMv7S, ARMv7SCfg, io_pgtable_fmt_ARM_V7S);
+iopt_type!(ARMMaliLPAE, ARMMaliLPAECfg, io_pgtable_fmt_ARM_MALI_LPAE);
+iopt_type!(AMDIOMMUV1, (), io_pgtable_fmt_AMD_IOMMU_V1);
+iopt_type!(AppleDART, AppleDARTCfg, io_pgtable_fmt_APPLE_DART);
+iopt_type!(AppleDART2, AppleDARTCfg, io_pgtable_fmt_APPLE_DART2);
+iopt_type!(AppleUAT, AppleUATCfg, io_pgtable_fmt_APPLE_UAT);
diff --git a/rust/kernel/ioctl.rs b/rust/kernel/ioctl.rs
new file mode 100644
index 000000000000..6cd8e5738b91
--- /dev/null
+++ b/rust/kernel/ioctl.rs
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+#![allow(non_snake_case)]
+
+//! ioctl() number definitions
+//!
+//! C header: [`include/asm-generic/ioctl.h`](../../../../include/asm-generic/ioctl.h)
+
+/// Build an ioctl number, analogous to the C macro of the same name.
+const fn _IOC(dir: u32, ty: u32, nr: u32, size: usize) -> u32 {
+ core::assert!(dir <= bindings::_IOC_DIRMASK);
+ core::assert!(ty <= bindings::_IOC_TYPEMASK);
+ core::assert!(nr <= bindings::_IOC_NRMASK);
+ core::assert!(size <= (bindings::_IOC_SIZEMASK as usize));
+
+ (dir << bindings::_IOC_DIRSHIFT)
+ | (ty << bindings::_IOC_TYPESHIFT)
+ | (nr << bindings::_IOC_NRSHIFT)
+ | ((size as u32) << bindings::_IOC_SIZESHIFT)
+}
+
+/// Build an ioctl number for an argumentless ioctl.
+pub const fn _IO(ty: u32, nr: u32) -> u32 {
+ _IOC(bindings::_IOC_NONE, ty, nr, 0)
+}
+
+/// Build an ioctl number for an read-only ioctl.
+pub const fn _IOR<T>(ty: u32, nr: u32) -> u32 {
+ _IOC(bindings::_IOC_READ, ty, nr, core::mem::size_of::<T>())
+}
+
+/// Build an ioctl number for an write-only ioctl.
+pub const fn _IOW<T>(ty: u32, nr: u32) -> u32 {
+ _IOC(bindings::_IOC_WRITE, ty, nr, core::mem::size_of::<T>())
+}
+
+/// Build an ioctl number for a read-write ioctl.
+pub const fn _IOWR<T>(ty: u32, nr: u32) -> u32 {
+ _IOC(
+ bindings::_IOC_READ | bindings::_IOC_WRITE,
+ ty,
+ nr,
+ core::mem::size_of::<T>(),
+ )
+}
+
+/// Get the ioctl direction from an ioctl number.
+pub const fn _IOC_DIR(nr: u32) -> u32 {
+ (nr >> bindings::_IOC_DIRSHIFT) & bindings::_IOC_DIRMASK
+}
+
+/// Get the ioctl type from an ioctl number.
+pub const fn _IOC_TYPE(nr: u32) -> u32 {
+ (nr >> bindings::_IOC_TYPESHIFT) & bindings::_IOC_TYPEMASK
+}
+
+/// Get the ioctl number from an ioctl number.
+pub const fn _IOC_NR(nr: u32) -> u32 {
+ (nr >> bindings::_IOC_NRSHIFT) & bindings::_IOC_NRMASK
+}
+
+/// Get the ioctl size from an ioctl number.
+pub const fn _IOC_SIZE(nr: u32) -> usize {
+ ((nr >> bindings::_IOC_SIZESHIFT) & bindings::_IOC_SIZEMASK) as usize
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 53040fa9e897..390f9f7b6f9c 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -13,7 +13,16 @@
#![no_std]
#![feature(allocator_api)]
-#![feature(core_ffi_c)]
+#![feature(associated_type_defaults)]
+#![feature(coerce_unsized)]
+#![feature(const_mut_refs)]
+#![feature(const_refs_to_cell)]
+#![feature(const_trait_impl)]
+#![feature(dispatch_from_dyn)]
+#![feature(duration_constants)]
+#![feature(new_uninit)]
+#![feature(receiver_trait)]
+#![feature(unsize)]
// Ensure conditional compilation based on the kernel configuration works;
// otherwise we may silently break things like initcall handling.
@@ -23,15 +32,36 @@ compile_error!("Missing kernel configuration for conditional compilation");
#[cfg(not(test))]
#[cfg(not(testlib))]
mod allocator;
+
mod build_assert;
+pub mod delay;
+pub mod device;
+#[cfg(CONFIG_DMA_SHARED_BUFFER)]
+pub mod dma_fence;
+pub mod driver;
+#[cfg(CONFIG_DRM = "y")]
+pub mod drm;
pub mod error;
+pub mod io_buffer;
+pub mod io_mem;
+pub mod io_pgtable;
+pub mod ioctl;
+pub mod module_param;
+pub mod of;
+pub mod platform;
pub mod prelude;
pub mod print;
+pub mod revocable;
+pub mod soc;
mod static_assert;
#[doc(hidden)]
pub mod std_vendor;
pub mod str;
+pub mod sync;
+pub mod time;
pub mod types;
+pub mod user_ptr;
+pub mod xarray;
#[doc(hidden)]
pub use bindings;
@@ -40,6 +70,16 @@ pub use macros;
#[doc(hidden)]
pub use build_error::build_error;
+pub(crate) mod private {
+ #[allow(unreachable_pub)]
+ pub trait Sealed {}
+}
+
+/// Page size defined in terms of the `PAGE_SHIFT` macro from C.
+///
+/// [`PAGE_SHIFT`]: ../../../include/asm-generic/page.h
+pub const PAGE_SIZE: usize = 1 << bindings::PAGE_SHIFT;
+
/// Prefix to appear before log messages printed from within the `kernel` crate.
const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
@@ -53,7 +93,7 @@ pub trait Module: Sized + Sync {
/// should do.
///
/// Equivalent to the `module_init` macro in the C API.
- fn init(module: &'static ThisModule) -> error::Result<Self>;
+ fn init(name: &'static crate::str::CStr, module: &'static ThisModule) -> error::Result<Self>;
}
/// Equivalent to `THIS_MODULE` in the C API.
@@ -73,6 +113,43 @@ impl ThisModule {
pub const unsafe fn from_ptr(ptr: *mut bindings::module) -> ThisModule {
ThisModule(ptr)
}
+
+ /// Locks the module parameters to access them.
+ ///
+ /// Returns a [`KParamGuard`] that will release the lock when dropped.
+ pub fn kernel_param_lock(&self) -> KParamGuard<'_> {
+ // SAFETY: `kernel_param_lock` will check if the pointer is null and
+ // use the built-in mutex in that case.
+ #[cfg(CONFIG_SYSFS)]
+ unsafe {
+ bindings::kernel_param_lock(self.0)
+ }
+
+ KParamGuard {
+ #[cfg(CONFIG_SYSFS)]
+ this_module: self,
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+/// Scoped lock on the kernel parameters of [`ThisModule`].
+///
+/// Lock will be released when this struct is dropped.
+pub struct KParamGuard<'a> {
+ #[cfg(CONFIG_SYSFS)]
+ this_module: &'a ThisModule,
+ phantom: core::marker::PhantomData<&'a ()>,
+}
+
+#[cfg(CONFIG_SYSFS)]
+impl<'a> Drop for KParamGuard<'a> {
+ fn drop(&mut self) {
+ // SAFETY: `kernel_param_lock` will check if the pointer is null and
+ // use the built-in mutex in that case. The existence of `self`
+ // guarantees that the lock is held.
+ unsafe { bindings::kernel_param_unlock(self.this_module.0) }
+ }
}
#[cfg(not(any(testlib, test)))]
@@ -85,3 +162,66 @@ fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
// instead of `!`. See <https://github.com/rust-lang/rust-bindgen/issues/2094>.
loop {}
}
+
+/// Calculates the offset of a field from the beginning of the struct it belongs to.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::offset_of;
+/// struct Test {
+/// a: u64,
+/// b: u32,
+/// }
+///
+/// assert_eq!(offset_of!(Test, b), 8);
+/// ```
+#[macro_export]
+macro_rules! offset_of {
+ ($type:ty, $($f:tt)*) => {{
+ let tmp = core::mem::MaybeUninit::<$type>::uninit();
+ let outer = tmp.as_ptr();
+ // To avoid warnings when nesting `unsafe` blocks.
+ #[allow(unused_unsafe)]
+ // SAFETY: The pointer is valid and aligned, just not initialised; `addr_of` ensures that
+ // we don't actually read from `outer` (which would be UB) nor create an intermediate
+ // reference.
+ let inner = unsafe { core::ptr::addr_of!((*outer).$($f)*) } as *const u8;
+ // To avoid warnings when nesting `unsafe` blocks.
+ #[allow(unused_unsafe)]
+ // SAFETY: The two pointers are within the same allocation block.
+ unsafe { inner.offset_from(outer as *const u8) }
+ }}
+}
+
+/// Produces a pointer to an object from a pointer to one of its fields.
+///
+/// # Safety
+///
+/// Callers must ensure that the pointer to the field is in fact a pointer to the specified field,
+/// as opposed to a pointer to another object of the same type. If this condition is not met,
+/// any dereference of the resulting pointer is UB.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::container_of;
+/// struct Test {
+/// a: u64,
+/// b: u32,
+/// }
+///
+/// let test = Test { a: 10, b: 20 };
+/// let b_ptr = &test.b;
+/// let test_alias = container_of!(b_ptr, Test, b);
+/// assert!(core::ptr::eq(&test, test_alias));
+/// ```
+#[macro_export]
+macro_rules! container_of {
+ ($ptr:expr, $type:ty, $($f:tt)*) => {{
+ let ptr = $ptr as *const _ as *const u8;
+ let offset = $crate::offset_of!($type, $($f)*);
+ ptr.wrapping_offset(-offset) as *const $type
+ }}
+}
diff --git a/rust/kernel/module_param.rs b/rust/kernel/module_param.rs
new file mode 100644
index 000000000000..d587f1036349
--- /dev/null
+++ b/rust/kernel/module_param.rs
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Types for module parameters.
+//!
+//! C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
+
+use crate::error::{code::*, from_kernel_result};
+use crate::str::{CStr, Formatter};
+use core::fmt::Write;
+
+/// Types that can be used for module parameters.
+///
+/// Note that displaying the type in `sysfs` will fail if
+/// [`alloc::string::ToString::to_string`] (as implemented through the
+/// [`core::fmt::Display`] trait) writes more than [`PAGE_SIZE`]
+/// bytes (including an additional null terminator).
+///
+/// [`PAGE_SIZE`]: `crate::PAGE_SIZE`
+pub trait ModuleParam: core::fmt::Display + core::marker::Sized {
+ /// The `ModuleParam` will be used by the kernel module through this type.
+ ///
+ /// This may differ from `Self` if, for example, `Self` needs to track
+ /// ownership without exposing it or allocate extra space for other possible
+ /// parameter values. See [`StringParam`] or [`ArrayParam`] for examples.
+ type Value: ?Sized;
+
+ /// Whether the parameter is allowed to be set without an argument.
+ ///
+ /// Setting this to `true` allows the parameter to be passed without an
+ /// argument (e.g. just `module.param` instead of `module.param=foo`).
+ const NOARG_ALLOWED: bool;
+
+ /// Convert a parameter argument into the parameter value.
+ ///
+ /// `None` should be returned when parsing of the argument fails.
+ /// `arg == None` indicates that the parameter was passed without an
+ /// argument. If `NOARG_ALLOWED` is set to `false` then `arg` is guaranteed
+ /// to always be `Some(_)`.
+ ///
+ /// Parameters passed at boot time will be set before [`kmalloc`] is
+ /// available (even if the module is loaded at a later time). However, in
+ /// this case, the argument buffer will be valid for the entire lifetime of
+ /// the kernel. So implementations of this method which need to allocate
+ /// should first check that the allocator is available (with
+ /// [`crate::bindings::slab_is_available`]) and when it is not available
+ /// provide an alternative implementation which doesn't allocate. In cases
+ /// where the allocator is not available it is safe to save references to
+ /// `arg` in `Self`, but in other cases a copy should be made.
+ ///
+ /// [`kmalloc`]: ../../../include/linux/slab.h
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self>;
+
+ /// Get the current value of the parameter for use in the kernel module.
+ ///
+ /// This function should not be used directly. Instead use the wrapper
+ /// `read` which will be generated by [`macros::module`].
+ fn value(&self) -> &Self::Value;
+
+ /// Set the module parameter from a string.
+ ///
+ /// Used to set the parameter value when loading the module or when set
+ /// through `sysfs`.
+ ///
+ /// # Safety
+ ///
+ /// If `val` is non-null then it must point to a valid null-terminated
+ /// string. The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn set_param(
+ val: *const core::ffi::c_char,
+ param: *const crate::bindings::kernel_param,
+ ) -> core::ffi::c_int {
+ let arg = if val.is_null() {
+ None
+ } else {
+ Some(unsafe { CStr::from_char_ptr(val).as_bytes() })
+ };
+ match Self::try_from_param_arg(arg) {
+ Some(new_value) => {
+ let old_value = unsafe { (*param).__bindgen_anon_1.arg as *mut Self };
+ let _ = unsafe { core::ptr::replace(old_value, new_value) };
+ 0
+ }
+ None => EINVAL.to_kernel_errno(),
+ }
+ }
+
+ /// Write a string representation of the current parameter value to `buf`.
+ ///
+ /// Used for displaying the current parameter value in `sysfs`.
+ ///
+ /// # Safety
+ ///
+ /// `buf` must be a buffer of length at least `kernel::PAGE_SIZE` that is
+ /// writeable. The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn get_param(
+ buf: *mut core::ffi::c_char,
+ param: *const crate::bindings::kernel_param,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The C contracts guarantees that the buffer is at least `PAGE_SIZE` bytes.
+ let mut f = unsafe { Formatter::from_buffer(buf.cast(), crate::PAGE_SIZE) };
+ unsafe { write!(f, "{}\0", *((*param).__bindgen_anon_1.arg as *mut Self)) }?;
+ Ok(f.bytes_written().try_into()?)
+ }
+ }
+
+ /// Drop the parameter.
+ ///
+ /// Called when unloading a module.
+ ///
+ /// # Safety
+ ///
+ /// The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn free(arg: *mut core::ffi::c_void) {
+ unsafe { core::ptr::drop_in_place(arg as *mut Self) };
+ }
+}
+
+/// Trait for parsing integers.
+///
+/// Strings beginning with `0x`, `0o`, or `0b` are parsed as hex, octal, or
+/// binary respectively. Strings beginning with `0` otherwise are parsed as
+/// octal. Anything else is parsed as decimal. A leading `+` or `-` is also
+/// permitted. The string may contain a trailing newline. Any string parsed
+/// by [`kstrtol()`] or [`kstrtoul()`] will be successfully parsed.
+///
+/// [`kstrtol()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtol
+/// [`kstrtoul()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtoul
+trait ParseInt: Sized {
+ fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError>;
+ fn checked_neg(self) -> Option<Self>;
+
+ fn from_str_unsigned(src: &str) -> Result<Self, core::num::ParseIntError> {
+ let src = src.strip_suffix('\n').unwrap_or(src);
+ let (radix, digits) = if let Some(n) = src.strip_prefix("0x") {
+ (16, n)
+ } else if let Some(n) = src.strip_prefix("0X") {
+ (16, n)
+ } else if let Some(n) = src.strip_prefix("0o") {
+ (8, n)
+ } else if let Some(n) = src.strip_prefix("0O") {
+ (8, n)
+ } else if let Some(n) = src.strip_prefix("0b") {
+ (2, n)
+ } else if let Some(n) = src.strip_prefix("0B") {
+ (2, n)
+ } else if src.starts_with('0') {
+ (8, src)
+ } else {
+ (10, src)
+ };
+ Self::from_str_radix(digits, radix)
+ }
+
+ fn from_str(src: &str) -> Option<Self> {
+ match src.bytes().next() {
+ None => None,
+ Some(b'-') => Self::from_str_unsigned(&src[1..]).ok()?.checked_neg(),
+ Some(b'+') => Some(Self::from_str_unsigned(&src[1..]).ok()?),
+ Some(_) => Some(Self::from_str_unsigned(src).ok()?),
+ }
+ }
+}
+
+macro_rules! impl_parse_int {
+ ($ty:ident) => {
+ impl ParseInt for $ty {
+ fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError> {
+ $ty::from_str_radix(src, radix)
+ }
+
+ fn checked_neg(self) -> Option<Self> {
+ self.checked_neg()
+ }
+ }
+ };
+}
+
+impl_parse_int!(i8);
+impl_parse_int!(u8);
+impl_parse_int!(i16);
+impl_parse_int!(u16);
+impl_parse_int!(i32);
+impl_parse_int!(u32);
+impl_parse_int!(i64);
+impl_parse_int!(u64);
+impl_parse_int!(isize);
+impl_parse_int!(usize);
+
+macro_rules! impl_module_param {
+ ($ty:ident) => {
+ impl ModuleParam for $ty {
+ type Value = $ty;
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ let bytes = arg?;
+ let utf8 = core::str::from_utf8(bytes).ok()?;
+ <$ty as crate::module_param::ParseInt>::from_str(utf8)
+ }
+
+ fn value(&self) -> &Self::Value {
+ self
+ }
+ }
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+/// Generate a static [`kernel_param_ops`](../../../include/linux/moduleparam.h) struct.
+///
+/// # Examples
+///
+/// ```ignore
+/// make_param_ops!(
+/// /// Documentation for new param ops.
+/// PARAM_OPS_MYTYPE, // Name for the static.
+/// MyType // A type which implements [`ModuleParam`].
+/// );
+/// ```
+macro_rules! make_param_ops {
+ ($ops:ident, $ty:ty) => {
+ $crate::make_param_ops!(
+ #[doc=""]
+ $ops,
+ $ty
+ );
+ };
+ ($(#[$meta:meta])* $ops:ident, $ty:ty) => {
+ $(#[$meta])*
+ ///
+ /// Static [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// struct generated by [`make_param_ops`].
+ pub static $ops: $crate::bindings::kernel_param_ops = $crate::bindings::kernel_param_ops {
+ flags: if <$ty as $crate::module_param::ModuleParam>::NOARG_ALLOWED {
+ $crate::bindings::KERNEL_PARAM_OPS_FL_NOARG
+ } else {
+ 0
+ },
+ set: Some(<$ty as $crate::module_param::ModuleParam>::set_param),
+ get: Some(<$ty as $crate::module_param::ModuleParam>::get_param),
+ free: Some(<$ty as $crate::module_param::ModuleParam>::free),
+ };
+ };
+}
+
+impl_module_param!(i8);
+impl_module_param!(u8);
+impl_module_param!(i16);
+impl_module_param!(u16);
+impl_module_param!(i32);
+impl_module_param!(u32);
+impl_module_param!(i64);
+impl_module_param!(u64);
+impl_module_param!(isize);
+impl_module_param!(usize);
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i8`].
+ PARAM_OPS_I8,
+ i8
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u8`].
+ PARAM_OPS_U8,
+ u8
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i16`].
+ PARAM_OPS_I16,
+ i16
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u16`].
+ PARAM_OPS_U16,
+ u16
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i32`].
+ PARAM_OPS_I32,
+ i32
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u32`].
+ PARAM_OPS_U32,
+ u32
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i64`].
+ PARAM_OPS_I64,
+ i64
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u64`].
+ PARAM_OPS_U64,
+ u64
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`isize`].
+ PARAM_OPS_ISIZE,
+ isize
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`usize`].
+ PARAM_OPS_USIZE,
+ usize
+);
+
+impl ModuleParam for bool {
+ type Value = bool;
+
+ const NOARG_ALLOWED: bool = true;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ match arg {
+ None => Some(true),
+ Some(b"y") | Some(b"Y") | Some(b"1") | Some(b"true") => Some(true),
+ Some(b"n") | Some(b"N") | Some(b"0") | Some(b"false") => Some(false),
+ _ => None,
+ }
+ }
+
+ fn value(&self) -> &Self::Value {
+ self
+ }
+}
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`bool`].
+ PARAM_OPS_BOOL,
+ bool
+);
+
+/// An array of at __most__ `N` values.
+///
+/// # Invariant
+///
+/// The first `self.used` elements of `self.values` are initialized.
+pub struct ArrayParam<T, const N: usize> {
+ values: [core::mem::MaybeUninit<T>; N],
+ used: usize,
+}
+
+impl<T, const N: usize> ArrayParam<T, { N }> {
+ fn values(&self) -> &[T] {
+ // SAFETY: The invariant maintained by `ArrayParam` allows us to cast
+ // the first `self.used` elements to `T`.
+ unsafe {
+ &*(&self.values[0..self.used] as *const [core::mem::MaybeUninit<T>] as *const [T])
+ }
+ }
+}
+
+impl<T: Copy, const N: usize> ArrayParam<T, { N }> {
+ const fn new() -> Self {
+ // INVARIANT: The first `self.used` elements of `self.values` are
+ // initialized.
+ ArrayParam {
+ values: [core::mem::MaybeUninit::uninit(); N],
+ used: 0,
+ }
+ }
+
+ const fn push(&mut self, val: T) {
+ if self.used < N {
+ // INVARIANT: The first `self.used` elements of `self.values` are
+ // initialized.
+ self.values[self.used] = core::mem::MaybeUninit::new(val);
+ self.used += 1;
+ }
+ }
+
+ /// Create an instance of `ArrayParam` initialized with `vals`.
+ ///
+ /// This function is only meant to be used in the [`module::module`] macro.
+ pub const fn create(vals: &[T]) -> Self {
+ let mut result = ArrayParam::new();
+ let mut i = 0;
+ while i < vals.len() {
+ result.push(vals[i]);
+ i += 1;
+ }
+ result
+ }
+}
+
+impl<T: core::fmt::Display, const N: usize> core::fmt::Display for ArrayParam<T, { N }> {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ for val in self.values() {
+ write!(f, "{},", val)?;
+ }
+ Ok(())
+ }
+}
+
+impl<T: Copy + core::fmt::Display + ModuleParam, const N: usize> ModuleParam
+ for ArrayParam<T, { N }>
+{
+ type Value = [T];
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ arg.and_then(|args| {
+ let mut result = Self::new();
+ for arg in args.split(|b| *b == b',') {
+ result.push(T::try_from_param_arg(Some(arg))?);
+ }
+ Some(result)
+ })
+ }
+
+ fn value(&self) -> &Self::Value {
+ self.values()
+ }
+}
+
+/// A C-style string parameter.
+///
+/// The Rust version of the [`charp`] parameter. This type is meant to be
+/// used by the [`macros::module`] macro, not handled directly. Instead use the
+/// `read` method generated by that macro.
+///
+/// [`charp`]: ../../../include/linux/moduleparam.h
+pub enum StringParam {
+ /// A borrowed parameter value.
+ ///
+ /// Either the default value (which is static in the module) or borrowed
+ /// from the original argument buffer used to set the value.
+ Ref(&'static [u8]),
+
+ /// A value that was allocated when the parameter was set.
+ ///
+ /// The value needs to be freed when the parameter is reset or the module is
+ /// unloaded.
+ Owned(alloc::vec::Vec<u8>),
+}
+
+impl StringParam {
+ fn bytes(&self) -> &[u8] {
+ match self {
+ #[allow(clippy::explicit_auto_deref)]
+ StringParam::Ref(bytes) => *bytes,
+ StringParam::Owned(vec) => &vec[..],
+ }
+ }
+}
+
+impl core::fmt::Display for StringParam {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ let bytes = self.bytes();
+ match core::str::from_utf8(bytes) {
+ Ok(utf8) => write!(f, "{}", utf8),
+ Err(_) => write!(f, "{:?}", bytes),
+ }
+ }
+}
+
+impl ModuleParam for StringParam {
+ type Value = [u8];
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ // SAFETY: It is always safe to call [`slab_is_available`](../../../include/linux/slab.h).
+ let slab_available = unsafe { crate::bindings::slab_is_available() };
+ arg.and_then(|arg| {
+ if slab_available {
+ let mut vec = alloc::vec::Vec::new();
+ vec.try_extend_from_slice(arg).ok()?;
+ Some(StringParam::Owned(vec))
+ } else {
+ Some(StringParam::Ref(arg))
+ }
+ })
+ }
+
+ fn value(&self) -> &Self::Value {
+ self.bytes()
+ }
+}
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`StringParam`].
+ PARAM_OPS_STR,
+ StringParam
+);
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
new file mode 100644
index 000000000000..a27621b57fbb
--- /dev/null
+++ b/rust/kernel/of.rs
@@ -0,0 +1,546 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Devicetree and Open Firmware abstractions.
+//!
+//! C header: [`include/linux/of_*.h`](../../../../include/linux/of_*.h)
+
+// Note: Most OF functions turn into inline dummies with CONFIG_OF(_*) disabled.
+// We have to either add config conditionals to helpers.c or here; let's do it
+// here for now. In the future, once bindgen can auto-generate static inline
+// helpers, this can go away if desired.
+
+use core::marker::PhantomData;
+use core::num::NonZeroU32;
+
+use crate::{
+ bindings, driver,
+ prelude::*,
+ str::{BStr, CStr},
+};
+
+/// An open firmware device id.
+#[derive(Clone, Copy)]
+pub enum DeviceId {
+ /// An open firmware device id where only a compatible string is specified.
+ Compatible(&'static BStr),
+}
+
+/// Defines a const open firmware device id table that also carries per-entry data/context/info.
+///
+/// # Example
+///
+/// ```
+/// # use kernel::{define_of_id_table, module_of_id_table, driver_of_id_table};
+/// use kernel::of;
+///
+/// define_of_id_table! {MY_ID_TABLE, u32, [
+/// (of::DeviceId::Compatible(b"test-device1,test-device2"), Some(0xff)),
+/// (of::DeviceId::Compatible(b"test-device3"), None),
+/// ]};
+///
+/// module_of_id_table!(MOD_TABLE, ASAHI_ID_TABLE);
+///
+/// // Within the `Driver` implementation:
+/// driver_of_id_table!(MY_ID_TABLE);
+/// ```
+#[macro_export]
+macro_rules! define_of_id_table {
+ ($name:ident, $data_type:ty, $($t:tt)*) => {
+ $crate::define_id_array!($name, $crate::of::DeviceId, $data_type, $($t)*);
+ };
+}
+
+/// Convenience macro to declare which device ID table to use for a bus driver.
+#[macro_export]
+macro_rules! driver_of_id_table {
+ ($name:expr) => {
+ $crate::driver_id_table!(
+ OF_DEVICE_ID_TABLE,
+ $crate::of::DeviceId,
+ Self::IdInfo,
+ $name
+ );
+ };
+}
+
+/// Declare a device ID table as a module-level table. This creates the necessary module alias
+/// entries to enable module autoloading.
+#[macro_export]
+macro_rules! module_of_id_table {
+ ($item_name:ident, $table_name:ident) => {
+ $crate::module_id_table!($item_name, "of", $crate::of::DeviceId, $table_name);
+ };
+}
+
+// SAFETY: `ZERO` is all zeroed-out and `to_rawid` stores `offset` in `of_device_id::data`.
+unsafe impl const driver::RawDeviceId for DeviceId {
+ type RawType = bindings::of_device_id;
+ const ZERO: Self::RawType = bindings::of_device_id {
+ name: [0; 32],
+ type_: [0; 32],
+ compatible: [0; 128],
+ data: core::ptr::null(),
+ };
+
+ fn to_rawid(&self, offset: isize) -> Self::RawType {
+ let DeviceId::Compatible(compatible) = self;
+ let mut id = Self::ZERO;
+ let mut i = 0;
+ while i < compatible.len() {
+ // If `compatible` does not fit in `id.compatible`, an "index out of bounds" build time
+ // error will be triggered.
+ id.compatible[i] = compatible[i] as _;
+ i += 1;
+ }
+ id.compatible[i] = b'\0' as _;
+ id.data = offset as _;
+ id
+ }
+}
+
+/// Type alias for an OF phandle
+pub type PHandle = bindings::phandle;
+
+/// An OF device tree node.
+///
+/// # Invariants
+///
+/// `raw_node` points to a valid OF node, and we hold a reference to it.
+pub struct Node {
+ raw_node: *mut bindings::device_node,
+}
+
+#[allow(dead_code)]
+impl Node {
+ /// Creates a `Node` from a raw C pointer. The pointer must be owned (the caller
+ /// gives up its reference). If the pointer is NULL, returns None.
+ pub(crate) unsafe fn from_raw(raw_node: *mut bindings::device_node) -> Option<Node> {
+ if raw_node.is_null() {
+ None
+ } else {
+ // INVARIANT: `raw_node` is valid per the above contract, and non-null per the
+ // above check.
+ Some(Node { raw_node })
+ }
+ }
+
+ /// Creates a `Node` from a raw C pointer. The pointer must be borrowed (the caller
+ /// retains its reference, which must be valid for the duration of the call). If the
+ /// pointer is NULL, returns None.
+ pub(crate) unsafe fn get_from_raw(raw_node: *mut bindings::device_node) -> Option<Node> {
+ // SAFETY: `raw_node` is valid or NULL per the above contract. `of_node_get` can handle
+ // NULL.
+ unsafe {
+ #[cfg(CONFIG_OF_DYNAMIC)]
+ bindings::of_node_get(raw_node);
+ Node::from_raw(raw_node)
+ }
+ }
+
+ /// Returns a reference to the underlying C `device_node` structure.
+ fn node(&self) -> &bindings::device_node {
+ // SAFETY: `raw_node` is valid per the type invariant.
+ unsafe { &*self.raw_node }
+ }
+
+ /// Returns the name of the node.
+ pub fn name(&self) -> &CStr {
+ // SAFETY: The lifetime of the `CStr` is the same as the lifetime of this `Node`.
+ unsafe { CStr::from_char_ptr(self.node().name) }
+ }
+
+ /// Returns the phandle for this node.
+ pub fn phandle(&self) -> PHandle {
+ self.node().phandle
+ }
+
+ /// Returns the full name (with address) for this node.
+ pub fn full_name(&self) -> &CStr {
+ // SAFETY: The lifetime of the `CStr` is the same as the lifetime of this `Node`.
+ unsafe { CStr::from_char_ptr(self.node().full_name) }
+ }
+
+ /// Returns `true` if the node is the root node.
+ pub fn is_root(&self) -> bool {
+ unsafe { bindings::of_node_is_root(self.raw_node) }
+ }
+
+ /// Returns the parent node, if any.
+ pub fn parent(&self) -> Option<Node> {
+ #[cfg(not(CONFIG_OF))]
+ {
+ None
+ }
+ #[cfg(CONFIG_OF)]
+ // SAFETY: `raw_node` is valid per the type invariant, and `of_get_parent()` takes a
+ // new reference to the parent (or returns NULL).
+ unsafe {
+ Node::from_raw(bindings::of_get_parent(self.raw_node))
+ }
+ }
+
+ /// Returns an iterator over the node's children.
+ // TODO: use type alias for return type once type_alias_impl_trait is stable
+ pub fn children(
+ &self,
+ ) -> NodeIterator<'_, impl Fn(*mut bindings::device_node) -> *mut bindings::device_node + '_>
+ {
+ #[cfg(not(CONFIG_OF))]
+ {
+ NodeIterator::new(|_prev| core::ptr::null_mut())
+ }
+ #[cfg(CONFIG_OF)]
+ // SAFETY: `raw_node` is valid per the type invariant, and the lifetime of the `NodeIterator`
+ // does not exceed the lifetime of the `Node` so it can borrow its reference.
+ NodeIterator::new(|prev| unsafe { bindings::of_get_next_child(self.raw_node, prev) })
+ }
+
+ /// Find a child by its name and return it, or None if not found.
+ #[allow(unused_variables)]
+ pub fn get_child_by_name(&self, name: &CStr) -> Option<Node> {
+ #[cfg(not(CONFIG_OF))]
+ {
+ None
+ }
+ #[cfg(CONFIG_OF)]
+ // SAFETY: `raw_node` is valid per the type invariant.
+ unsafe {
+ Node::from_raw(bindings::of_get_child_by_name(
+ self.raw_node,
+ name.as_char_ptr(),
+ ))
+ }
+ }
+
+ /// Checks whether the node is compatible with the given compatible string.
+ ///
+ /// Returns `None` if there is no match, or `Some<NonZeroU32>` if there is, with the value
+ /// representing as match score (higher values for more specific compatible matches).
+ #[allow(unused_variables)]
+ pub fn is_compatible(&self, compatible: &CStr) -> Option<NonZeroU32> {
+ #[cfg(not(CONFIG_OF))]
+ let ret = 0;
+ #[cfg(CONFIG_OF)]
+ // SAFETY: `raw_node` is valid per the type invariant.
+ let ret =
+ unsafe { bindings::of_device_is_compatible(self.raw_node, compatible.as_char_ptr()) };
+
+ NonZeroU32::new(ret.try_into().ok()?)
+ }
+
+ /// Parse a phandle property and return the Node referenced at a given index, if any.
+ ///
+ /// Used only for phandle properties with no arguments.
+ #[allow(unused_variables)]
+ pub fn parse_phandle(&self, name: &CStr, index: usize) -> Option<Node> {
+ #[cfg(not(CONFIG_OF))]
+ {
+ None
+ }
+ #[cfg(CONFIG_OF)]
+ // SAFETY: `raw_node` is valid per the type invariant. `of_parse_phandle` returns an
+ // owned reference.
+ unsafe {
+ Node::from_raw(bindings::of_parse_phandle(
+ self.raw_node,
+ name.as_char_ptr(),
+ index.try_into().ok()?,
+ ))
+ }
+ }
+
+ #[allow(unused_variables)]
+ /// Look up a node property by name, returning a `Property` object if found.
+ pub fn find_property(&self, propname: &CStr) -> Option<Property<'_>> {
+ #[cfg(not(CONFIG_OF))]
+ {
+ None
+ }
+ #[cfg(CONFIG_OF)]
+ // SAFETY: `raw_node` is valid per the type invariant. The property structure
+ // returned borrows the reference to the owning node, and so has the same
+ // lifetime.
+ unsafe {
+ Property::from_raw(bindings::of_find_property(
+ self.raw_node,
+ propname.as_char_ptr(),
+ core::ptr::null_mut(),
+ ))
+ }
+ }
+
+ /// Look up a mandatory node property by name, and decode it into a value type.
+ ///
+ /// Returns `Err(ENOENT)` if the property is not found.
+ ///
+ /// The type `T` must implement `TryFrom<Property<'_>>`.
+ pub fn get_property<'a, T: TryFrom<Property<'a>>>(&'a self, propname: &CStr) -> Result<T>
+ where
+ crate::error::Error: From<<T as TryFrom<Property<'a>>>::Error>,
+ {
+ Ok(self.find_property(propname).ok_or(ENOENT)?.try_into()?)
+ }
+
+ /// Look up an optional node property by name, and decode it into a value type.
+ ///
+ /// Returns `Ok(None)` if the property is not found.
+ ///
+ /// The type `T` must implement `TryFrom<Property<'_>>`.
+ pub fn get_opt_property<'a, T: TryFrom<Property<'a>>>(
+ &'a self,
+ propname: &CStr,
+ ) -> Result<Option<T>>
+ where
+ crate::error::Error: From<<T as TryFrom<Property<'a>>>::Error>,
+ {
+ self.find_property(propname)
+ .map_or(Ok(None), |p| Ok(Some(p.try_into()?)))
+ }
+}
+
+/// A property attached to a device tree `Node`.
+///
+/// # Invariants
+///
+/// `raw` must be valid and point to a property that outlives the lifetime of this object.
+#[derive(Copy, Clone)]
+pub struct Property<'a> {
+ raw: *mut bindings::property,
+ _p: PhantomData<&'a Node>,
+}
+
+impl<'a> Property<'a> {
+ #[cfg(CONFIG_OF)]
+ /// Create a `Property` object from a raw C pointer. Returns `None` if NULL.
+ ///
+ /// The passed pointer must be valid and outlive the lifetime argument, or NULL.
+ unsafe fn from_raw(raw: *mut bindings::property) -> Option<Property<'a>> {
+ if raw.is_null() {
+ None
+ } else {
+ Some(Property {
+ raw,
+ _p: PhantomData,
+ })
+ }
+ }
+
+ /// Returns the name of the property as a `CStr`.
+ pub fn name(&self) -> &CStr {
+ // SAFETY: `raw` is valid per the type invariant, and the lifetime of the `CStr` does not
+ // outlive it.
+ unsafe { CStr::from_char_ptr((*self.raw).name) }
+ }
+
+ /// Returns the name of the property as a `&[u8]`.
+ pub fn value(&self) -> &[u8] {
+ // SAFETY: `raw` is valid per the type invariant, and the lifetime of the slice does not
+ // outlive it.
+ unsafe { core::slice::from_raw_parts((*self.raw).value as *const u8, self.len()) }
+ }
+
+ /// Returns the length of the property in bytes.
+ pub fn len(&self) -> usize {
+ // SAFETY: `raw` is valid per the type invariant.
+ unsafe { (*self.raw).length.try_into().unwrap() }
+ }
+
+ /// Returns true if the property is empty (zero-length), which typically represents boolean true.
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+/// A trait that represents a value decodable from a property with a fixed unit size.
+///
+/// This allows us to auto-derive property decode implementations for `Vec<T: PropertyUnit>`.
+pub trait PropertyUnit: Sized {
+ /// The size in bytes of a single data unit.
+ const UNIT_SIZE: usize;
+
+ /// Decode this data unit from a byte slice. The passed slice will have a length of `UNIT_SIZE`.
+ fn from_bytes(data: &[u8]) -> Result<Self>;
+}
+
+// This doesn't work...
+// impl<'a, T: PropertyUnit> TryFrom<Property<'a>> for T {
+// type Error = Error;
+//
+// fn try_from(p: Property<'_>) -> core::result::Result<T, Self::Error> {
+// if p.value().len() != T::UNIT_SIZE {
+// Err(EINVAL)
+// } else {
+// Ok(T::from_bytes(p.value())?)
+// }
+// }
+// }
+
+impl<'a, T: PropertyUnit> TryFrom<Property<'a>> for Vec<T> {
+ type Error = Error;
+
+ fn try_from(p: Property<'_>) -> core::result::Result<Vec<T>, Self::Error> {
+ if p.len() % T::UNIT_SIZE != 0 {
+ return Err(EINVAL);
+ }
+
+ let mut v = Vec::new();
+ let val = p.value();
+ for off in (0..p.len()).step_by(T::UNIT_SIZE) {
+ v.try_push(T::from_bytes(&val[off..off + T::UNIT_SIZE])?)?;
+ }
+ Ok(v)
+ }
+}
+
+macro_rules! prop_int_type (
+ ($type:ty) => {
+ impl<'a> TryFrom<Property<'a>> for $type {
+ type Error = Error;
+
+ fn try_from(p: Property<'_>) -> core::result::Result<$type, Self::Error> {
+ Ok(<$type>::from_be_bytes(p.value().try_into().or(Err(EINVAL))?))
+ }
+ }
+
+ impl PropertyUnit for $type {
+ const UNIT_SIZE: usize = <$type>::BITS as usize / 8;
+
+ fn from_bytes(data: &[u8]) -> Result<Self> {
+ Ok(<$type>::from_be_bytes(data.try_into().or(Err(EINVAL))?))
+ }
+ }
+ }
+);
+
+prop_int_type!(u8);
+prop_int_type!(u16);
+prop_int_type!(u32);
+prop_int_type!(u64);
+prop_int_type!(i8);
+prop_int_type!(i16);
+prop_int_type!(i32);
+prop_int_type!(i64);
+
+/// An iterator across a collection of Node objects.
+///
+/// # Invariants
+///
+/// `cur` must be NULL or a valid node owned reference. If NULL, it represents either the first
+/// or last position of the iterator.
+///
+/// If `done` is true, `cur` must be NULL.
+///
+/// fn_next must be a callback that iterates from one node to the next, and it must not capture
+/// values that exceed the lifetime of the iterator. It must return owned references and also
+/// take owned references.
+pub struct NodeIterator<'a, T>
+where
+ T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+ cur: *mut bindings::device_node,
+ done: bool,
+ fn_next: T,
+ _p: PhantomData<&'a T>,
+}
+
+impl<'a, T> NodeIterator<'a, T>
+where
+ T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+ fn new(next: T) -> NodeIterator<'a, T> {
+ // INVARIANT: `cur` is initialized to NULL to represent the initial state.
+ NodeIterator {
+ cur: core::ptr::null_mut(),
+ done: false,
+ fn_next: next,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<'a, T> Iterator for NodeIterator<'a, T>
+where
+ T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+ type Item = Node;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.done {
+ None
+ } else {
+ // INVARIANT: if the new `cur` is NULL, then the iterator has reached its end and we
+ // set `done` to `true`.
+ self.cur = (self.fn_next)(self.cur);
+ self.done = self.cur.is_null();
+ // SAFETY: `fn_next` must return an owned reference per the iterator contract.
+ // The iterator itself is considered to own this reference, so we take another one.
+ unsafe { Node::get_from_raw(self.cur) }
+ }
+ }
+}
+
+// Drop impl to ensure we drop the current node being iterated on, if any.
+impl<'a, T> Drop for NodeIterator<'a, T>
+where
+ T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+ fn drop(&mut self) {
+ // SAFETY: `cur` is valid or NULL, and `of_node_put()` can handle NULL.
+ #[cfg(CONFIG_OF_DYNAMIC)]
+ unsafe {
+ bindings::of_node_put(self.cur)
+ };
+ }
+}
+
+/// Returns the root node of the OF device tree (if any).
+pub fn root() -> Option<Node> {
+ unsafe { Node::get_from_raw(bindings::of_root) }
+}
+
+/// Returns the /chosen node of the OF device tree (if any).
+pub fn chosen() -> Option<Node> {
+ unsafe { Node::get_from_raw(bindings::of_chosen) }
+}
+
+/// Returns the /aliases node of the OF device tree (if any).
+pub fn aliases() -> Option<Node> {
+ unsafe { Node::get_from_raw(bindings::of_aliases) }
+}
+
+/// Returns the system stdout node of the OF device tree (if any).
+pub fn stdout() -> Option<Node> {
+ unsafe { Node::get_from_raw(bindings::of_stdout) }
+}
+
+#[allow(unused_variables)]
+/// Looks up a node in the device tree by phandle.
+pub fn find_node_by_phandle(handle: PHandle) -> Option<Node> {
+ #[cfg(not(CONFIG_OF))]
+ {
+ None
+ }
+ #[cfg(CONFIG_OF)]
+ unsafe {
+ #[allow(dead_code)]
+ Node::from_raw(bindings::of_find_node_by_phandle(handle))
+ }
+}
+
+impl Clone for Node {
+ fn clone(&self) -> Node {
+ // SAFETY: `raw_node` is valid and non-NULL per the type invariant,
+ // so this can never return None.
+ unsafe { Node::get_from_raw(self.raw_node).unwrap() }
+ }
+}
+
+impl Drop for Node {
+ fn drop(&mut self) {
+ #[cfg(CONFIG_OF_DYNAMIC)]
+ // SAFETY: `raw_node` is valid per the type invariant.
+ unsafe {
+ bindings::of_node_put(self.raw_node)
+ };
+ }
+}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
new file mode 100644
index 000000000000..542865da17a1
--- /dev/null
+++ b/rust/kernel/platform.rs
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Platform devices and drivers.
+//!
+//! Also called `platdev`, `pdev`.
+//!
+//! C header: [`include/linux/platform_device.h`](../../../../include/linux/platform_device.h)
+
+use crate::{
+ bindings,
+ device::{self, RawDevice},
+ driver,
+ error::{code::*, from_kernel_result, to_result, Result},
+ io_mem::{IoMem, IoResource, Resource},
+ of,
+ str::CStr,
+ types::ForeignOwnable,
+ ThisModule,
+};
+
+/// A registration of a platform driver.
+pub type Registration<T> = driver::Registration<Adapter<T>>;
+
+/// An adapter for the registration of platform drivers.
+pub struct Adapter<T: Driver>(T);
+
+impl<T: Driver> driver::DriverOps for Adapter<T> {
+ type RegType = bindings::platform_driver;
+
+ unsafe fn register(
+ reg: *mut bindings::platform_driver,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: By the safety requirements of this function (defined in the trait definition),
+ // `reg` is non-null and valid.
+ let pdrv = unsafe { &mut *reg };
+
+ pdrv.driver.name = name.as_char_ptr();
+ pdrv.probe = Some(Self::probe_callback);
+ pdrv.remove = Some(Self::remove_callback);
+ if let Some(t) = T::OF_DEVICE_ID_TABLE {
+ pdrv.driver.of_match_table = t.as_ref();
+ }
+ // SAFETY:
+ // - `pdrv` lives at least until the call to `platform_driver_unregister()` returns.
+ // - `name` pointer has static lifetime.
+ // - `module.0` lives at least as long as the module.
+ // - `probe()` and `remove()` are static functions.
+ // - `of_match_table` is either a raw pointer with static lifetime,
+ // as guaranteed by the [`driver::IdTable`] type, or null.
+ to_result(unsafe { bindings::__platform_driver_register(reg, module.0) })
+ }
+
+ unsafe fn unregister(reg: *mut bindings::platform_driver) {
+ // SAFETY: By the safety requirements of this function (defined in the trait definition),
+ // `reg` was passed (and updated) by a previous successful call to
+ // `platform_driver_register`.
+ unsafe { bindings::platform_driver_unregister(reg) };
+ }
+}
+
+impl<T: Driver> Adapter<T> {
+ fn get_id_info(dev: &Device) -> Option<&'static T::IdInfo> {
+ let table = T::OF_DEVICE_ID_TABLE?;
+
+ // SAFETY: `table` has static lifetime, so it is valid for read. `dev` is guaranteed to be
+ // valid while it's alive, so is the raw device returned by it.
+ let id = unsafe { bindings::of_match_device(table.as_ref(), dev.raw_device()) };
+ if id.is_null() {
+ return None;
+ }
+
+ // SAFETY: `id` is a pointer within the static table, so it's always valid.
+ let offset = unsafe { (*id).data };
+ if offset.is_null() {
+ return None;
+ }
+
+ // SAFETY: The offset comes from a previous call to `offset_from` in `IdArray::new`, which
+ // guarantees that the resulting pointer is within the table.
+ let ptr = unsafe {
+ id.cast::<u8>()
+ .offset(offset as _)
+ .cast::<Option<T::IdInfo>>()
+ };
+
+ // SAFETY: The id table has a static lifetime, so `ptr` is guaranteed to be valid for read.
+ #[allow(clippy::needless_borrow)]
+ unsafe {
+ (&*ptr).as_ref()
+ }
+ }
+
+ extern "C" fn probe_callback(pdev: *mut bindings::platform_device) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `pdev` is valid by the contract with the C code. `dev` is alive only for the
+ // duration of this call, so it is guaranteed to remain alive for the lifetime of
+ // `pdev`.
+ let mut dev = unsafe { Device::from_ptr(pdev) };
+ let info = Self::get_id_info(&dev);
+ let data = T::probe(&mut dev, info)?;
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ unsafe { bindings::platform_set_drvdata(pdev, data.into_foreign() as _) };
+ Ok(0)
+ }
+ }
+
+ extern "C" fn remove_callback(pdev: *mut bindings::platform_device) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ let ptr = unsafe { bindings::platform_get_drvdata(pdev) };
+ // SAFETY:
+ // - we allocated this pointer using `T::Data::into_foreign`,
+ // so it is safe to turn back into a `T::Data`.
+ // - the allocation happened in `probe`, no-one freed the memory,
+ // `remove` is the canonical kernel location to free driver data. so OK
+ // to convert the pointer back to a Rust structure here.
+ let data = unsafe { T::Data::from_foreign(ptr) };
+ let ret = T::remove(&data);
+ <T::Data as driver::DeviceRemoval>::device_remove(&data);
+ ret?;
+ Ok(0)
+ }
+ }
+}
+
+/// A platform driver.
+pub trait Driver {
+ /// Data stored on device by driver.
+ ///
+ /// Corresponds to the data set or retrieved via the kernel's
+ /// `platform_{set,get}_drvdata()` functions.
+ ///
+ /// Require that `Data` implements `ForeignOwnable`. We guarantee to
+ /// never move the underlying wrapped data structure. This allows
+ type Data: ForeignOwnable + Send + Sync + driver::DeviceRemoval = ();
+
+ /// The type holding information about each device id supported by the driver.
+ type IdInfo: 'static = ();
+
+ /// The table of device ids supported by the driver.
+ const OF_DEVICE_ID_TABLE: Option<driver::IdTable<'static, of::DeviceId, Self::IdInfo>> = None;
+
+ /// Platform driver probe.
+ ///
+ /// Called when a new platform device is added or discovered.
+ /// Implementers should attempt to initialize the device here.
+ fn probe(dev: &mut Device, id_info: Option<&Self::IdInfo>) -> Result<Self::Data>;
+
+ /// Platform driver remove.
+ ///
+ /// Called when a platform device is removed.
+ /// Implementers should prepare the device for complete removal here.
+ fn remove(_data: &Self::Data) -> Result {
+ Ok(())
+ }
+}
+
+/// A platform device.
+///
+/// # Invariants
+///
+/// The field `ptr` is non-null and valid for the lifetime of the object.
+pub struct Device {
+ ptr: *mut bindings::platform_device,
+ used_resource: u64,
+}
+
+impl Device {
+ /// Creates a new device from the given pointer.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null and valid. It must remain valid for the lifetime of the returned
+ /// instance.
+ unsafe fn from_ptr(ptr: *mut bindings::platform_device) -> Self {
+ // INVARIANT: The safety requirements of the function ensure the lifetime invariant.
+ Self {
+ ptr,
+ used_resource: 0,
+ }
+ }
+
+ /// Returns id of the platform device.
+ pub fn id(&self) -> i32 {
+ // SAFETY: By the type invariants, we know that `self.ptr` is non-null and valid.
+ unsafe { (*self.ptr).id }
+ }
+
+ /// Sets the DMA masks (normal and coherent) for a platform device.
+ pub fn set_dma_masks(&mut self, mask: u64) -> Result {
+ to_result(unsafe { bindings::dma_set_mask_and_coherent(&mut (*self.ptr).dev, mask) })
+ }
+
+ /// Gets a system resources of a platform device.
+ pub fn get_resource(&mut self, rtype: IoResource, num: usize) -> Result<Resource> {
+ // SAFETY: `self.ptr` is valid by the type invariant.
+ let res = unsafe { bindings::platform_get_resource(self.ptr, rtype as _, num as _) };
+ if res.is_null() {
+ return Err(EINVAL);
+ }
+
+ // Get the position of the found resource in the array.
+ // SAFETY:
+ // - `self.ptr` is valid by the type invariant.
+ // - `res` is a displaced pointer to one of the array's elements,
+ // and `resource` is its base pointer.
+ let index = unsafe { res.offset_from((*self.ptr).resource) } as usize;
+
+ // Make sure that the index does not exceed the 64-bit mask.
+ assert!(index < 64);
+
+ if self.used_resource >> index & 1 == 1 {
+ return Err(EBUSY);
+ }
+ self.used_resource |= 1 << index;
+
+ // SAFETY: The pointer `res` is returned from `bindings::platform_get_resource`
+ // above and checked if it is not a NULL.
+ unsafe { Resource::new((*res).start, (*res).end, (*res).flags) }.ok_or(EINVAL)
+ }
+
+ /// Ioremaps resources of a platform device.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that either (a) the resulting interface cannot be used to initiate DMA
+ /// operations, or (b) that DMA operations initiated via the returned interface use DMA handles
+ /// allocated through the `dma` module.
+ pub unsafe fn ioremap_resource<const SIZE: usize>(
+ &mut self,
+ index: usize,
+ ) -> Result<IoMem<SIZE>> {
+ let mask = self.used_resource;
+ let res = self.get_resource(IoResource::Mem, index)?;
+
+ // SAFETY: Valid by the safety contract.
+ let iomem = unsafe { IoMem::<SIZE>::try_new(res) };
+ // If remapping fails, the given resource won't be used, so restore the old mask.
+ if iomem.is_err() {
+ self.used_resource = mask;
+ }
+ iomem
+ }
+}
+
+// SAFETY: The device returned by `raw_device` is the raw platform device.
+unsafe impl device::RawDevice for Device {
+ fn raw_device(&self) -> *mut bindings::device {
+ // SAFETY: By the type invariants, we know that `self.ptr` is non-null and valid.
+ unsafe { &mut (*self.ptr).dev }
+ }
+}
+
+/// Declares a kernel module that exposes a single platform driver.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::{platform, define_of_id_table, module_platform_driver};
+/// #
+/// struct MyDriver;
+/// impl platform::Driver for MyDriver {
+/// // [...]
+/// # fn probe(_dev: &mut platform::Device, _id_info: Option<&Self::IdInfo>) -> Result {
+/// # Ok(())
+/// # }
+/// # define_of_id_table! {(), [
+/// # (of::DeviceId::Compatible(b"brcm,bcm2835-rng"), None),
+/// # ]}
+/// }
+///
+/// module_platform_driver! {
+/// type: MyDriver,
+/// name: "module_name",
+/// author: "Author name",
+/// license: "GPL",
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_platform_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::platform::Adapter<T>, { $($f)* });
+ };
+}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index 7a90249ee9b9..f145a09603a5 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -11,15 +11,23 @@
//! use kernel::prelude::*;
//! ```
+#[doc(no_inline)]
pub use core::pin::Pin;
+#[doc(no_inline)]
pub use alloc::{boxed::Box, vec::Vec};
+#[doc(no_inline)]
pub use macros::{module, vtable};
pub use super::build_assert;
-pub use super::{dbg, pr_alert, pr_crit, pr_debug, pr_emerg, pr_err, pr_info, pr_notice, pr_warn};
+// `super::std_vendor` is hidden, which makes the macro inline for some reason.
+#[doc(no_inline)]
+pub use super::dbg;
+pub use super::fmt;
+pub use super::{dev_alert, dev_crit, dev_dbg, dev_emerg, dev_err, dev_info, dev_notice, dev_warn};
+pub use super::{pr_alert, pr_crit, pr_debug, pr_emerg, pr_err, pr_info, pr_notice, pr_warn};
pub use super::static_assert;
diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs
new file mode 100644
index 000000000000..1093c4d26026
--- /dev/null
+++ b/rust/kernel/revocable.rs
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Revocable objects.
+//!
+//! The [`Revocable`] type wraps other types and allows access to them to be revoked. The existence
+//! of a [`RevocableGuard`] ensures that objects remain valid.
+
+use crate::{bindings, sync::rcu};
+use core::{
+ cell::UnsafeCell,
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ops::Deref,
+ ptr::drop_in_place,
+ sync::atomic::{fence, AtomicBool, AtomicU32, Ordering},
+};
+
+/// An object that can become inaccessible at runtime.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`RevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = Revocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+///
+/// Sample example as above, but explicitly using the rcu read side lock.
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+/// use kernel::sync::rcu;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = rcu::read_lock();
+/// let e = v.try_access_with_guard(&guard)?;
+/// Some(e.a + e.b)
+/// }
+///
+/// let v = Revocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+pub struct Revocable<T> {
+ is_available: AtomicBool,
+ data: MaybeUninit<UnsafeCell<T>>,
+}
+
+// SAFETY: `Revocable` is `Send` if the wrapped object is also `Send`. This is because while the
+// functionality exposed by `Revocable` can be accessed from any thread/CPU, it is possible that
+// this isn't supported by the wrapped object.
+unsafe impl<T: Send> Send for Revocable<T> {}
+
+// SAFETY: `Revocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require `Send`
+// from the wrapped object as well because of `Revocable::revoke`, which can trigger the `Drop`
+// implementation of the wrapped object from an arbitrary thread.
+unsafe impl<T: Sync + Send> Sync for Revocable<T> {}
+
+impl<T> Revocable<T> {
+ /// Creates a new revocable instance of the given data.
+ pub const fn new(data: T) -> Self {
+ Self {
+ is_available: AtomicBool::new(true),
+ data: MaybeUninit::new(UnsafeCell::new(data)),
+ }
+ }
+
+ /// Tries to access the \[revocable\] wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. In such cases, callers are not allowed to sleep
+ /// because another CPU may be waiting to complete the revocation of this object.
+ pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
+ let guard = rcu::read_lock();
+ if self.is_available.load(Ordering::Relaxed) {
+ // SAFETY: Since `self.is_available` is true, data is initialised and has to remain
+ // valid because the RCU read side lock prevents it from being dropped.
+ Some(unsafe { RevocableGuard::new(self.data.assume_init_ref().get(), guard) })
+ } else {
+ None
+ }
+ }
+
+ /// Tries to access the \[revocable\] wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a shared reference to the object otherwise; the object is guaranteed to
+ /// remain accessible while the rcu read side guard is alive. In such cases, callers are not
+ /// allowed to sleep because another CPU may be waiting to complete the revocation of this
+ /// object.
+ pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> {
+ if self.is_available.load(Ordering::Relaxed) {
+ // SAFETY: Since `self.is_available` is true, data is initialised and has to remain
+ // valid because the RCU read side lock prevents it from being dropped.
+ Some(unsafe { &*self.data.assume_init_ref().get() })
+ } else {
+ None
+ }
+ }
+
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`]. If
+ /// there are concurrent users of the object (i.e., ones that called [`Revocable::try_access`]
+ /// beforehand and still haven't dropped the returned guard), this function waits for the
+ /// concurrent access to complete before dropping the wrapped object.
+ pub fn revoke(&self) {
+ if self
+ .is_available
+ .compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok()
+ {
+ // SAFETY: Just an FFI call, there are no further requirements.
+ unsafe { bindings::synchronize_rcu() };
+
+ // SAFETY: We know `self.data` is valid because only one CPU can succeed the
+ // `compare_exchange` above that takes `is_available` from `true` to `false`.
+ unsafe { drop_in_place(self.data.assume_init_ref().get()) };
+ }
+ }
+}
+
+impl<T> Drop for Revocable<T> {
+ fn drop(&mut self) {
+ // Drop only if the data hasn't been revoked yet (in which case it has already been
+ // dropped).
+ if *self.is_available.get_mut() {
+ // SAFETY: We know `self.data` is valid because no other CPU has changed
+ // `is_available` to `false` yet, and no other CPU can do it anymore because this CPU
+ // holds the only reference (mutable) to `self` now.
+ unsafe { drop_in_place(self.data.assume_init_ref().get()) };
+ }
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+///
+/// CPUs may not sleep while holding on to [`RevocableGuard`] because it's in atomic context
+/// holding the RCU read-side lock.
+///
+/// # Invariants
+///
+/// The RCU read-side lock is held while the guard is alive.
+pub struct RevocableGuard<'a, T> {
+ data_ref: *const T,
+ _rcu_guard: rcu::Guard,
+ _p: PhantomData<&'a ()>,
+}
+
+impl<T> RevocableGuard<'_, T> {
+ fn new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self {
+ Self {
+ data_ref,
+ _rcu_guard: rcu_guard,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T> Deref for RevocableGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariants, we hold the rcu read-side lock, so the object is
+ // guaranteed to remain valid.
+ unsafe { &*self.data_ref }
+ }
+}
+
+/// An object that can become inaccessible at runtime.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`AsyncRevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// Unlike [`Revocable`], [`AsyncRevocable`] does not wait for concurrent users of the wrapped
+/// object to finish before [`AsyncRevocable::revoke`] completes -- thus the async qualifier. This
+/// has the advantage of not requiring RCU locks or waits of any kind.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::revocable::AsyncRevocable;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &AsyncRevocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = AsyncRevocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+///
+/// Example where revocation happens while there is a user:
+///
+/// ```
+/// # use kernel::revocable::AsyncRevocable;
+/// use core::sync::atomic::{AtomicBool, Ordering};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// static DROPPED: AtomicBool = AtomicBool::new(false);
+///
+/// impl Drop for Example {
+/// fn drop(&mut self) {
+/// DROPPED.store(true, Ordering::Relaxed);
+/// }
+/// }
+///
+/// fn add_two(v: &AsyncRevocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = AsyncRevocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+///
+/// let guard = v.try_access().unwrap();
+/// assert!(!v.is_revoked());
+/// assert!(!DROPPED.load(Ordering::Relaxed));
+/// v.revoke();
+/// assert!(!DROPPED.load(Ordering::Relaxed));
+/// assert!(v.is_revoked());
+/// assert!(v.try_access().is_none());
+/// assert_eq!(guard.a + guard.b, 30);
+/// drop(guard);
+/// assert!(DROPPED.load(Ordering::Relaxed));
+/// ```
+pub struct AsyncRevocable<T> {
+ usage_count: AtomicU32,
+ data: MaybeUninit<UnsafeCell<T>>,
+}
+
+// SAFETY: `AsyncRevocable` is `Send` if the wrapped object is also `Send`. This is because while
+// the functionality exposed by `AsyncRevocable` can be accessed from any thread/CPU, it is
+// possible that this isn't supported by the wrapped object.
+unsafe impl<T: Send> Send for AsyncRevocable<T> {}
+
+// SAFETY: `AsyncRevocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require
+// `Send` from the wrapped object as well because of `AsyncRevocable::revoke`, which can trigger
+// the `Drop` implementation of the wrapped object from an arbitrary thread.
+unsafe impl<T: Sync + Send> Sync for AsyncRevocable<T> {}
+
+const REVOKED: u32 = 0x80000000;
+const COUNT_MASK: u32 = !REVOKED;
+const SATURATED_COUNT: u32 = REVOKED - 1;
+
+impl<T> AsyncRevocable<T> {
+ /// Creates a new asynchronously revocable instance of the given data.
+ pub fn new(data: T) -> Self {
+ Self {
+ usage_count: AtomicU32::new(0),
+ data: MaybeUninit::new(UnsafeCell::new(data)),
+ }
+ }
+
+ /// Tries to access the \[revocable\] wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive.
+ pub fn try_access(&self) -> Option<AsyncRevocableGuard<'_, T>> {
+ loop {
+ let count = self.usage_count.load(Ordering::Relaxed);
+
+ // Fail attempt to access if the object is already revoked.
+ if count & REVOKED != 0 {
+ return None;
+ }
+
+ // No need to increment if the count is saturated.
+ if count == SATURATED_COUNT
+ || self
+ .usage_count
+ .compare_exchange(count, count + 1, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok()
+ {
+ return Some(AsyncRevocableGuard { revocable: self });
+ }
+ }
+ }
+
+ /// Revokes access to the protected object.
+ ///
+ /// Returns `true` if access has been revoked, or `false` when the object has already been
+ /// revoked by a previous call to [`AsyncRevocable::revoke`].
+ ///
+ /// This call is non-blocking, that is, no new users of the revocable object will be allowed,
+ /// but potential current users are able to continue to use it and the thread won't wait for
+ /// them to finish. In such cases, the object will be dropped when the last user completes.
+ pub fn revoke(&self) -> bool {
+ // Set the `REVOKED` bit.
+ //
+ // The acquire barrier matches up with the release when decrementing the usage count.
+ let prev = self.usage_count.fetch_or(REVOKED, Ordering::Acquire);
+ if prev & REVOKED != 0 {
+ // Another thread already revoked this object.
+ return false;
+ }
+
+ if prev == 0 {
+ // SAFETY: This thread just revoked the object and the usage count is zero, so the
+ // object is valid and there will be no future users.
+ unsafe { drop_in_place(UnsafeCell::raw_get(self.data.as_ptr())) };
+ }
+
+ true
+ }
+
+ /// Returns whether access to the object has been revoked.
+ pub fn is_revoked(&self) -> bool {
+ self.usage_count.load(Ordering::Relaxed) & REVOKED != 0
+ }
+}
+
+impl<T> Drop for AsyncRevocable<T> {
+ fn drop(&mut self) {
+ let count = *self.usage_count.get_mut();
+ if count != REVOKED {
+ // The object hasn't been dropped yet, so we do it now.
+
+ // This matches with the release when decrementing the usage count.
+ fence(Ordering::Acquire);
+
+ // SAFETY: Since `count` is does not indicate a count of 0 and the REVOKED bit set, the
+ // object is still valid.
+ unsafe { drop_in_place(UnsafeCell::raw_get(self.data.as_ptr())) };
+ }
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+///
+/// # Invariants
+///
+/// The owner owns an increment on the usage count (which may have saturated it), which keeps the
+/// revocable object alive.
+pub struct AsyncRevocableGuard<'a, T> {
+ revocable: &'a AsyncRevocable<T>,
+}
+
+impl<T> Deref for AsyncRevocableGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the caller owns an increment.
+ unsafe { &*self.revocable.data.assume_init_ref().get() }
+ }
+}
+
+impl<T> Drop for AsyncRevocableGuard<'_, T> {
+ fn drop(&mut self) {
+ loop {
+ let count = self.revocable.usage_count.load(Ordering::Relaxed);
+ let actual_count = count & COUNT_MASK;
+ if actual_count == SATURATED_COUNT {
+ // The count is saturated, so we won't decrement (nor do we drop the object).
+ return;
+ }
+
+ if actual_count == 0 {
+ // Trying to underflow the count.
+ panic!("actual_count is zero");
+ }
+
+ // On success, we use release ordering, which matches with the acquire in one of the
+ // places where we drop the object, namely: below, in `AsyncRevocable::revoke`, or in
+ // `AsyncRevocable::drop`.
+ if self
+ .revocable
+ .usage_count
+ .compare_exchange(count, count - 1, Ordering::Release, Ordering::Relaxed)
+ .is_ok()
+ {
+ if count == 1 | REVOKED {
+ // `count` is now zero and it is revoked, so free it now.
+
+ // This matches with the release above (which may have happened in other
+ // threads concurrently).
+ fence(Ordering::Acquire);
+
+ // SAFETY: Since `count` was 1, the object is still alive.
+ unsafe { drop_in_place(UnsafeCell::raw_get(self.revocable.data.as_ptr())) };
+ }
+
+ return;
+ }
+ }
+ }
+}
diff --git a/rust/kernel/soc/apple/mod.rs b/rust/kernel/soc/apple/mod.rs
new file mode 100644
index 000000000000..dd69db63677d
--- /dev/null
+++ b/rust/kernel/soc/apple/mod.rs
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Apple SoC drivers
+
+#[cfg(CONFIG_APPLE_RTKIT = "y")]
+pub mod rtkit;
diff --git a/rust/kernel/soc/apple/rtkit.rs b/rust/kernel/soc/apple/rtkit.rs
new file mode 100644
index 000000000000..bde63cd00193
--- /dev/null
+++ b/rust/kernel/soc/apple/rtkit.rs
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Support for Apple RTKit coprocessors.
+//!
+//! C header: [`include/linux/soc/apple/rtkit.h`](../../../../include/linux/gpio/driver.h)
+
+use crate::{
+ bindings, device,
+ error::{code::*, from_kernel_err_ptr, to_result, Error, Result},
+ str::CStr,
+ types::{ForeignOwnable, ScopeGuard},
+};
+
+use alloc::boxed::Box;
+use core::marker::PhantomData;
+use core::ptr;
+use macros::vtable;
+
+/// Trait to represent allocatable buffers for the RTKit core.
+///
+/// Users must implement this trait for their own representation of those allocations.
+pub trait Buffer {
+ /// Returns the IOVA (virtual address) of the buffer from RTKit's point of view, or an error if
+ /// unavailable.
+ fn iova(&self) -> Result<usize>;
+
+ /// Returns a mutable byte slice of the buffer contents, or an
+ /// error if unavailable.
+ fn buf(&mut self) -> Result<&mut [u8]>;
+}
+
+/// Callback operations for an RTKit client.
+#[vtable]
+pub trait Operations {
+ /// Arbitrary user context type.
+ type Data: ForeignOwnable + Send + Sync;
+
+ /// Type representing an allocated buffer for RTKit.
+ type Buffer: Buffer;
+
+ /// Called when RTKit crashes.
+ fn crashed(_data: <Self::Data as ForeignOwnable>::Borrowed<'_>) {}
+
+ /// Called when a message was received on a non-system endpoint. Called in non-IRQ context.
+ fn recv_message(
+ _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+ _endpoint: u8,
+ _message: u64,
+ ) {
+ }
+
+ /// Called in IRQ context when a message was received on a non-system endpoint.
+ ///
+ /// Must return `true` if the message is handled, or `false` to process it in
+ /// the handling thread.
+ fn recv_message_early(
+ _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+ _endpoint: u8,
+ _message: u64,
+ ) -> bool {
+ false
+ }
+
+ /// Allocate a buffer for use by RTKit.
+ fn shmem_alloc(
+ _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+ _size: usize,
+ ) -> Result<Self::Buffer> {
+ Err(EINVAL)
+ }
+
+ /// Map an existing buffer used by RTKit at a device-specified virtual address.
+ fn shmem_map(
+ _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+ _iova: usize,
+ _size: usize,
+ ) -> Result<Self::Buffer> {
+ Err(EINVAL)
+ }
+}
+
+/// Represents `struct apple_rtkit *`.
+///
+/// # Invariants
+///
+/// The rtk pointer is valid.
+/// The data pointer is a valid pointer from T::Data::into_foreign().
+pub struct RtKit<T: Operations> {
+ rtk: *mut bindings::apple_rtkit,
+ data: *mut core::ffi::c_void,
+ _p: PhantomData<T>,
+}
+
+unsafe extern "C" fn crashed_callback<T: Operations>(cookie: *mut core::ffi::c_void) {
+ T::crashed(unsafe { T::Data::borrow(cookie) });
+}
+
+unsafe extern "C" fn recv_message_callback<T: Operations>(
+ cookie: *mut core::ffi::c_void,
+ endpoint: u8,
+ message: u64,
+) {
+ T::recv_message(unsafe { T::Data::borrow(cookie) }, endpoint, message);
+}
+
+unsafe extern "C" fn recv_message_early_callback<T: Operations>(
+ cookie: *mut core::ffi::c_void,
+ endpoint: u8,
+ message: u64,
+) -> bool {
+ T::recv_message_early(unsafe { T::Data::borrow(cookie) }, endpoint, message)
+}
+
+unsafe extern "C" fn shmem_setup_callback<T: Operations>(
+ cookie: *mut core::ffi::c_void,
+ bfr: *mut bindings::apple_rtkit_shmem,
+) -> core::ffi::c_int {
+ // SAFETY: `bfr` is a valid buffer
+ let bfr_mut = unsafe { &mut *bfr };
+
+ let buf = if bfr_mut.iova != 0 {
+ bfr_mut.is_mapped = true;
+ T::shmem_map(
+ // SAFETY: `cookie` came from a previous call to `into_foreign`.
+ unsafe { T::Data::borrow(cookie) },
+ bfr_mut.iova as usize,
+ bfr_mut.size,
+ )
+ } else {
+ bfr_mut.is_mapped = false;
+ // SAFETY: `cookie` came from a previous call to `into_foreign`.
+ T::shmem_alloc(unsafe { T::Data::borrow(cookie) }, bfr_mut.size)
+ };
+
+ let mut buf = match buf {
+ Err(e) => {
+ return e.to_kernel_errno();
+ }
+ Ok(buf) => buf,
+ };
+
+ let iova = match buf.iova() {
+ Err(e) => {
+ return e.to_kernel_errno();
+ }
+ Ok(iova) => iova,
+ };
+
+ let slice = match buf.buf() {
+ Err(e) => {
+ return e.to_kernel_errno();
+ }
+ Ok(slice) => slice,
+ };
+
+ if slice.len() < bfr_mut.size {
+ return ENOMEM.to_kernel_errno();
+ }
+
+ bfr_mut.iova = iova as u64;
+ bfr_mut.buffer = slice.as_mut_ptr() as *mut _;
+
+ // Now box the returned buffer type and stash it in the private pointer of the
+ // `apple_rtkit_shmem` struct for safekeeping.
+ match Box::try_new(buf) {
+ Err(e) => Error::from(e).to_kernel_errno(),
+ Ok(boxed) => {
+ bfr_mut.private = Box::into_raw(boxed) as *mut _;
+ 0
+ }
+ }
+}
+
+unsafe extern "C" fn shmem_destroy_callback<T: Operations>(
+ _cookie: *mut core::ffi::c_void,
+ bfr: *mut bindings::apple_rtkit_shmem,
+) {
+ let bfr_mut = unsafe { &mut *bfr };
+ // SAFETY: Per shmem_setup_callback, this has to be a pointer to a Buffer if it is set.
+ if !bfr_mut.private.is_null() {
+ unsafe {
+ core::mem::drop(Box::from_raw(bfr_mut.private as *mut T::Buffer));
+ }
+ bfr_mut.private = core::ptr::null_mut();
+ }
+}
+
+impl<T: Operations> RtKit<T> {
+ const VTABLE: bindings::apple_rtkit_ops = bindings::apple_rtkit_ops {
+ crashed: Some(crashed_callback::<T>),
+ recv_message: Some(recv_message_callback::<T>),
+ recv_message_early: Some(recv_message_early_callback::<T>),
+ shmem_setup: if T::HAS_SHMEM_ALLOC || T::HAS_SHMEM_MAP {
+ Some(shmem_setup_callback::<T>)
+ } else {
+ None
+ },
+ shmem_destroy: if T::HAS_SHMEM_ALLOC || T::HAS_SHMEM_MAP {
+ Some(shmem_destroy_callback::<T>)
+ } else {
+ None
+ },
+ };
+
+ /// Creates a new RTKit client for a given device and optional mailbox name or index.
+ pub fn new(
+ dev: &dyn device::RawDevice,
+ mbox_name: Option<&'static CStr>,
+ mbox_idx: usize,
+ data: T::Data,
+ ) -> Result<Self> {
+ let ptr = data.into_foreign() as *mut _;
+ let guard = ScopeGuard::new(|| {
+ // SAFETY: `ptr` came from a previous call to `into_foreign`.
+ unsafe { T::Data::from_foreign(ptr) };
+ });
+ // SAFETY: This just calls the C init function.
+ let rtk = unsafe {
+ from_kernel_err_ptr(bindings::apple_rtkit_init(
+ dev.raw_device(),
+ ptr,
+ match mbox_name {
+ Some(s) => s.as_char_ptr(),
+ None => ptr::null(),
+ },
+ mbox_idx.try_into()?,
+ &Self::VTABLE,
+ ))
+ }?;
+
+ guard.dismiss();
+ // INVARIANT: `rtk` and `data` are valid here.
+ Ok(Self {
+ rtk,
+ data: ptr,
+ _p: PhantomData,
+ })
+ }
+
+ /// Boots (wakes up) the RTKit coprocessor.
+ pub fn boot(&mut self) -> Result {
+ // SAFETY: `rtk` is valid per the type invariant.
+ to_result(unsafe { bindings::apple_rtkit_boot(self.rtk) })
+ }
+
+ /// Starts a non-system endpoint.
+ pub fn start_endpoint(&mut self, endpoint: u8) -> Result {
+ // SAFETY: `rtk` is valid per the type invariant.
+ to_result(unsafe { bindings::apple_rtkit_start_ep(self.rtk, endpoint) })
+ }
+
+ /// Sends a message to a given endpoint.
+ pub fn send_message(&mut self, endpoint: u8, message: u64) -> Result {
+ // SAFETY: `rtk` is valid per the type invariant.
+ to_result(unsafe {
+ bindings::apple_rtkit_send_message(self.rtk, endpoint, message, ptr::null_mut(), false)
+ })
+ }
+}
+
+// SAFETY: `RtKit` operations require a mutable reference
+unsafe impl<T: Operations> Sync for RtKit<T> {}
+
+// SAFETY: `RtKit` operations require a mutable reference
+unsafe impl<T: Operations> Send for RtKit<T> {}
+
+impl<T: Operations> Drop for RtKit<T> {
+ fn drop(&mut self) {
+ // SAFETY: The pointer is valid by the type invariant.
+ unsafe { bindings::apple_rtkit_free(self.rtk) };
+
+ // Free context data.
+ //
+ // SAFETY: This matches the call to `into_foreign` from `new` in the success case.
+ unsafe { T::Data::from_foreign(self.data) };
+ }
+}
diff --git a/rust/kernel/soc/mod.rs b/rust/kernel/soc/mod.rs
new file mode 100644
index 000000000000..e3024042e74f
--- /dev/null
+++ b/rust/kernel/soc/mod.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! SoC drivers
+
+pub mod apple;
diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
index b3e68b24a8c6..2e864354cac1 100644
--- a/rust/kernel/std_vendor.rs
+++ b/rust/kernel/std_vendor.rs
@@ -137,6 +137,7 @@
/// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html
/// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html
/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
+#[allow(rustdoc::broken_intra_doc_links)]
#[macro_export]
macro_rules! dbg {
// NOTE: We cannot use `concat!` to make a static string as a format argument
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
new file mode 100644
index 000000000000..c86faf35999c
--- /dev/null
+++ b/rust/kernel/sync.rs
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Synchronisation primitives.
+//!
+//! This module contains the kernel APIs related to synchronisation that have been ported or
+//! wrapped for usage by Rust code in the kernel.
+
+mod arc;
+mod condvar;
+mod guard;
+mod mutex;
+pub mod rcu;
+mod revocable;
+pub mod smutex;
+
+use crate::{bindings, str::CStr};
+use core::{cell::UnsafeCell, mem::MaybeUninit, pin::Pin};
+
+pub use arc::{Arc, ArcBorrow, UniqueArc};
+pub use condvar::CondVar;
+pub use guard::{Guard, Lock, LockFactory, LockInfo, LockIniter, ReadLock, WriteLock};
+pub use mutex::{Mutex, RevocableMutex, RevocableMutexGuard};
+pub use revocable::{Revocable, RevocableGuard};
+
+/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
+#[repr(transparent)]
+pub struct LockClassKey(UnsafeCell<MaybeUninit<bindings::lock_class_key>>);
+
+// SAFETY: This is a wrapper around a lock class key, so it is safe to use references to it from
+// any thread.
+unsafe impl Sync for LockClassKey {}
+
+impl LockClassKey {
+ /// Creates a new lock class key.
+ pub const fn new() -> Self {
+ Self(UnsafeCell::new(MaybeUninit::uninit()))
+ }
+
+ #[allow(dead_code)]
+ pub(crate) fn get(&self) -> *mut bindings::lock_class_key {
+ self.0.get().cast()
+ }
+}
+
+/// Safely initialises an object that has an `init` function that takes a name and a lock class as
+/// arguments, examples of these are [`Mutex`] and [`SpinLock`]. Each of them also provides a more
+/// specialised name that uses this macro.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! init_with_lockdep {
+ ($obj:expr, $name:expr) => {{
+ static CLASS1: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ static CLASS2: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ let obj = $obj;
+ let name = $crate::c_str!($name);
+ $crate::sync::NeedsLockClass::init(obj, name, &CLASS1, &CLASS2)
+ }};
+}
+
+/// A trait for types that need a lock class during initialisation.
+///
+/// Implementers of this trait benefit from the [`init_with_lockdep`] macro that generates a new
+/// class for each initialisation call site.
+pub trait NeedsLockClass {
+ /// Initialises the type instance so that it can be safely used.
+ ///
+ /// Callers are encouraged to use the [`init_with_lockdep`] macro as it automatically creates a
+ /// new lock class on each usage.
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key1: &'static LockClassKey,
+ key2: &'static LockClassKey,
+ );
+}
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
new file mode 100644
index 000000000000..a1cc6c0882c4
--- /dev/null
+++ b/rust/kernel/sync/arc.rs
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A reference-counted pointer.
+//!
+//! This module implements a way for users to create reference-counted objects and pointers to
+//! them. Such a pointer automatically increments and decrements the count, and drops the
+//! underlying object when it reaches zero. It is also safe to use concurrently from multiple
+//! threads.
+//!
+//! It is different from the standard library's [`Arc`] in a few ways:
+//! 1. It is backed by the kernel's `refcount_t` type.
+//! 2. It does not support weak references, which allows it to be half the size.
+//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
+//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
+//!
+//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
+
+use crate::{
+ bindings,
+ error::Result,
+ types::{ForeignOwnable, Opaque},
+};
+use alloc::boxed::Box;
+use core::{
+ any::Any,
+ fmt,
+ marker::{PhantomData, Unsize},
+ mem::{ManuallyDrop, MaybeUninit},
+ ops::{Deref, DerefMut},
+ pin::Pin,
+ ptr::NonNull,
+};
+
+/// A reference-counted pointer to an instance of `T`.
+///
+/// The reference count is incremented when new instances of [`Arc`] are created, and decremented
+/// when they are dropped. When the count reaches zero, the underlying `T` is also dropped.
+///
+/// # Invariants
+///
+/// The reference count on an instance of [`Arc`] is always non-zero.
+/// The object pointed to by [`Arc`] is always pinned.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::Arc;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// // Create a ref-counted instance of `Example`.
+/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+///
+/// // Get a new pointer to `obj` and increment the refcount.
+/// let cloned = obj.clone();
+///
+/// // Assert that both `obj` and `cloned` point to the same underlying object.
+/// assert!(core::ptr::eq(&*obj, &*cloned));
+///
+/// // Destroy `obj` and decrement its refcount.
+/// drop(obj);
+///
+/// // Check that the values are still accessible through `cloned`.
+/// assert_eq!(cloned.a, 10);
+/// assert_eq!(cloned.b, 20);
+///
+/// // The refcount drops to zero when `cloned` goes out of scope, and the memory is freed.
+/// ```
+///
+/// Using `Arc<T>` as the type of `self`:
+///
+/// ```
+/// use kernel::sync::Arc;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// impl Example {
+/// fn take_over(self: Arc<Self>) {
+/// // ...
+/// }
+///
+/// fn use_reference(self: &Arc<Self>) {
+/// // ...
+/// }
+/// }
+///
+/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// obj.use_reference();
+/// obj.take_over();
+/// ```
+///
+/// Coercion from `Arc<Example>` to `Arc<dyn MyTrait>`:
+///
+/// ```
+/// use kernel::sync::{Arc, ArcBorrow};
+///
+/// trait MyTrait {
+/// // Trait has a function whose `self` type is `Arc<Self>`.
+/// fn example1(self: Arc<Self>) {}
+///
+/// // Trait has a function whose `self` type is `ArcBorrow<'_, Self>`.
+/// fn example2(self: ArcBorrow<'_, Self>) {}
+/// }
+///
+/// struct Example;
+/// impl MyTrait for Example {}
+///
+/// // `obj` has type `Arc<Example>`.
+/// let obj: Arc<Example> = Arc::try_new(Example)?;
+///
+/// // `coerced` has type `Arc<dyn MyTrait>`.
+/// let coerced: Arc<dyn MyTrait> = obj;
+/// ```
+pub struct Arc<T: ?Sized> {
+ ptr: NonNull<ArcInner<T>>,
+ _p: PhantomData<ArcInner<T>>,
+}
+
+#[repr(C)]
+struct ArcInner<T: ?Sized> {
+ refcount: Opaque<bindings::refcount_t>,
+ data: T,
+}
+
+// This is to allow [`Arc`] (and variants) to be used as the type of `self`.
+impl<T: ?Sized> core::ops::Receiver for Arc<T> {}
+
+// This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the
+// dynamically-sized type (DST) `U`.
+impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {}
+
+// This is to allow `Arc<U>` to be dispatched on when `Arc<T>` can be coerced into `Arc<U>`.
+impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Arc<T> {}
+
+// SAFETY: It is safe to send `Arc<T>` to another thread when the underlying `T` is `Sync` because
+// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
+// `T` to be `Send` because any thread that has an `Arc<T>` may ultimately access `T` directly, for
+// example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
+
+// SAFETY: It is safe to send `&Arc<T>` to another thread when the underlying `T` is `Sync` for the
+// same reason as above. `T` needs to be `Send` as well because a thread can clone an `&Arc<T>`
+// into an `Arc<T>`, which may lead to `T` being accessed by the same reasoning as above.
+unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+
+impl<T> Arc<T> {
+ /// Constructs a new reference counted instance of `T`.
+ pub fn try_new(contents: T) -> Result<Self> {
+ // INVARIANT: The refcount is initialised to a non-zero value.
+ let value = ArcInner {
+ // SAFETY: There are no safety requirements for this FFI call.
+ refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+ data: contents,
+ };
+
+ let inner = Box::try_new(value)?;
+
+ // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new
+ // `Arc` object.
+ Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Constructs a new [`Arc`] from an existing [`ArcInner`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `inner` points to a valid location and has a non-zero reference
+ /// count, one of which will be owned by the new [`Arc`] instance.
+ unsafe fn from_inner(inner: NonNull<ArcInner<T>>) -> Self {
+ // INVARIANT: By the safety requirements, the invariants hold.
+ Arc {
+ ptr: inner,
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns an [`ArcBorrow`] from the given [`Arc`].
+ ///
+ /// This is useful when the argument of a function call is an [`ArcBorrow`] (e.g., in a method
+ /// receiver), but we have an [`Arc`] instead. Getting an [`ArcBorrow`] is free when optimised.
+ #[inline]
+ pub fn as_arc_borrow(&self) -> ArcBorrow<'_, T> {
+ // SAFETY: The constraint that the lifetime of the shared reference must outlive that of
+ // the returned `ArcBorrow` ensures that the object remains alive and that no mutable
+ // reference can be created.
+ unsafe { ArcBorrow::new(self.ptr) }
+ }
+}
+
+impl<T: 'static> ForeignOwnable for Arc<T> {
+ type Borrowed<'a> = ArcBorrow<'a, T>;
+
+ fn into_foreign(self) -> *const core::ffi::c_void {
+ ManuallyDrop::new(self).ptr.as_ptr() as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> ArcBorrow<'a, T> {
+ // SAFETY: By the safety requirement of this function, we know that `ptr` came from
+ // a previous call to `Arc::into_foreign`.
+ let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap();
+
+ // SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive
+ // for the lifetime of the returned value. Additionally, the safety requirements of
+ // `ForeignOwnable::borrow_mut` ensure that no new mutable references are created.
+ unsafe { ArcBorrow::new(inner) }
+ }
+
+ unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+ // SAFETY: By the safety requirement of this function, we know that `ptr` came from
+ // a previous call to `Arc::into_foreign`, which guarantees that `ptr` is valid and
+ // holds a reference count increment that is transferrable to us.
+ unsafe { Self::from_inner(NonNull::new(ptr as _).unwrap()) }
+ }
+}
+
+impl Arc<dyn Any + Send + Sync> {
+ /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
+ pub fn downcast<T>(self) -> core::result::Result<Arc<T>, Self>
+ where
+ T: Any + Send + Sync,
+ {
+ if (*self).is::<T>() {
+ // SAFETY: We have just checked that the type is correct, so we can cast the pointer.
+ unsafe {
+ let ptr = self.ptr.cast::<ArcInner<T>>();
+ core::mem::forget(self);
+ Ok(Arc::from_inner(ptr))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl<T: ?Sized> Deref for Arc<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to dereference it.
+ unsafe { &self.ptr.as_ref().data }
+ }
+}
+
+impl<T: ?Sized> Clone for Arc<T> {
+ fn clone(&self) -> Self {
+ // INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero.
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to increment the refcount.
+ unsafe { bindings::refcount_inc(self.ptr.as_ref().refcount.get()) };
+
+ // SAFETY: We just incremented the refcount. This increment is now owned by the new `Arc`.
+ unsafe { Self::from_inner(self.ptr) }
+ }
+}
+
+impl<T: ?Sized> Drop for Arc<T> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object. We cannot
+ // touch `refcount` after it's decremented to a non-zero value because another thread/CPU
+ // may concurrently decrement it to zero and free it. It is ok to have a raw pointer to
+ // freed/invalid memory as long as it is never dereferenced.
+ let refcount = unsafe { self.ptr.as_ref() }.refcount.get();
+
+ // INVARIANT: If the refcount reaches zero, there are no other instances of `Arc`, and
+ // this instance is being dropped, so the broken invariant is not observable.
+ // SAFETY: Also by the type invariant, we are allowed to decrement the refcount.
+ let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
+ if is_zero {
+ // The count reached zero, we must free the memory.
+ //
+ // SAFETY: The pointer was initialised from the result of `Box::leak`.
+ unsafe { Box::from_raw(self.ptr.as_ptr()) };
+ }
+ }
+}
+
+impl<T: ?Sized> From<UniqueArc<T>> for Arc<T> {
+ fn from(item: UniqueArc<T>) -> Self {
+ item.inner
+ }
+}
+
+impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
+ fn from(item: Pin<UniqueArc<T>>) -> Self {
+ // SAFETY: The type invariants of `Arc` guarantee that the data is pinned.
+ unsafe { Pin::into_inner_unchecked(item).inner }
+ }
+}
+
+/// A borrowed reference to an [`Arc`] instance.
+///
+/// For cases when one doesn't ever need to increment the refcount on the allocation, it is simpler
+/// to use just `&T`, which we can trivially get from an `Arc<T>` instance.
+///
+/// However, when one may need to increment the refcount, it is preferable to use an `ArcBorrow<T>`
+/// over `&Arc<T>` because the latter results in a double-indirection: a pointer (shared reference)
+/// to a pointer (`Arc<T>`) to the object (`T`). An [`ArcBorrow`] eliminates this double
+/// indirection while still allowing one to increment the refcount and getting an `Arc<T>` when/if
+/// needed.
+///
+/// # Invariants
+///
+/// There are no mutable references to the underlying [`Arc`], and it remains valid for the
+/// lifetime of the [`ArcBorrow`] instance.
+///
+/// # Example
+///
+/// ```
+/// use crate::sync::{Arc, ArcBorrow};
+///
+/// struct Example;
+///
+/// fn do_something(e: ArcBorrow<'_, Example>) -> Arc<Example> {
+/// e.into()
+/// }
+///
+/// let obj = Arc::try_new(Example)?;
+/// let cloned = do_something(obj.as_arc_borrow());
+///
+/// // Assert that both `obj` and `cloned` point to the same underlying object.
+/// assert!(core::ptr::eq(&*obj, &*cloned));
+/// ```
+///
+/// Using `ArcBorrow<T>` as the type of `self`:
+///
+/// ```
+/// use crate::sync::{Arc, ArcBorrow};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// impl Example {
+/// fn use_reference(self: ArcBorrow<'_, Self>) {
+/// // ...
+/// }
+/// }
+///
+/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
+/// obj.as_arc_borrow().use_reference();
+/// ```
+pub struct ArcBorrow<'a, T: ?Sized + 'a> {
+ inner: NonNull<ArcInner<T>>,
+ _p: PhantomData<&'a ()>,
+}
+
+// This is to allow [`ArcBorrow`] (and variants) to be used as the type of `self`.
+impl<T: ?Sized> core::ops::Receiver for ArcBorrow<'_, T> {}
+
+// This is to allow `ArcBorrow<U>` to be dispatched on when `ArcBorrow<T>` can be coerced into
+// `ArcBorrow<U>`.
+impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>>
+ for ArcBorrow<'_, T>
+{
+}
+
+impl<T: ?Sized> Clone for ArcBorrow<'_, T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T: ?Sized> Copy for ArcBorrow<'_, T> {}
+
+impl<T: ?Sized> ArcBorrow<'_, T> {
+ /// Creates a new [`ArcBorrow`] instance.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure the following for the lifetime of the returned [`ArcBorrow`] instance:
+ /// 1. That `inner` remains valid;
+ /// 2. That no mutable references to `inner` are created.
+ unsafe fn new(inner: NonNull<ArcInner<T>>) -> Self {
+ // INVARIANT: The safety requirements guarantee the invariants.
+ Self {
+ inner,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized> From<ArcBorrow<'_, T>> for Arc<T> {
+ fn from(b: ArcBorrow<'_, T>) -> Self {
+ // SAFETY: The existence of `b` guarantees that the refcount is non-zero. `ManuallyDrop`
+ // guarantees that `drop` isn't called, so it's ok that the temporary `Arc` doesn't own the
+ // increment.
+ ManuallyDrop::new(unsafe { Arc::from_inner(b.inner) })
+ .deref()
+ .clone()
+ }
+}
+
+impl<T: ?Sized> Deref for ArcBorrow<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant, the underlying object is still alive with no mutable
+ // references to it, so it is safe to create a shared reference.
+ unsafe { &self.inner.as_ref().data }
+ }
+}
+
+/// A refcounted object that is known to have a refcount of 1.
+///
+/// It is mutable and can be converted to an [`Arc`] so that it can be shared.
+///
+/// # Invariants
+///
+/// `inner` always has a reference count of 1.
+///
+/// # Examples
+///
+/// In the following example, we make changes to the inner object before turning it into an
+/// `Arc<Test>` object (after which point, it cannot be mutated directly). Note that `x.into()`
+/// cannot fail.
+///
+/// ```
+/// use kernel::sync::{Arc, UniqueArc};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn test() -> Result<Arc<Example>> {
+/// let mut x = UniqueArc::try_new(Example { a: 10, b: 20 })?;
+/// x.a += 1;
+/// x.b += 1;
+/// Ok(x.into())
+/// }
+///
+/// # test().unwrap();
+/// ```
+///
+/// In the following example we first allocate memory for a ref-counted `Example` but we don't
+/// initialise it on allocation. We do initialise it later with a call to [`UniqueArc::write`],
+/// followed by a conversion to `Arc<Example>`. This is particularly useful when allocation happens
+/// in one context (e.g., sleepable) and initialisation in another (e.g., atomic):
+///
+/// ```
+/// use kernel::sync::{Arc, UniqueArc};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn test() -> Result<Arc<Example>> {
+/// let x = UniqueArc::try_new_uninit()?;
+/// Ok(x.write(Example { a: 10, b: 20 }).into())
+/// }
+///
+/// # test().unwrap();
+/// ```
+///
+/// In the last example below, the caller gets a pinned instance of `Example` while converting to
+/// `Arc<Example>`; this is useful in scenarios where one needs a pinned reference during
+/// initialisation, for example, when initialising fields that are wrapped in locks.
+///
+/// ```
+/// use kernel::sync::{Arc, UniqueArc};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn test() -> Result<Arc<Example>> {
+/// let mut pinned = Pin::from(UniqueArc::try_new(Example { a: 10, b: 20 })?);
+/// // We can modify `pinned` because it is `Unpin`.
+/// pinned.as_mut().a += 1;
+/// Ok(pinned.into())
+/// }
+///
+/// # test().unwrap();
+/// ```
+pub struct UniqueArc<T: ?Sized> {
+ inner: Arc<T>,
+}
+
+impl<T> UniqueArc<T> {
+ /// Tries to allocate a new [`UniqueArc`] instance.
+ pub fn try_new(value: T) -> Result<Self> {
+ Ok(Self {
+ // INVARIANT: The newly-created object has a ref-count of 1.
+ inner: Arc::try_new(value)?,
+ })
+ }
+
+ /// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
+ pub fn try_new_uninit() -> Result<UniqueArc<MaybeUninit<T>>> {
+ Ok(UniqueArc::<MaybeUninit<T>> {
+ // INVARIANT: The newly-created object has a ref-count of 1.
+ inner: Arc::try_new(MaybeUninit::uninit())?,
+ })
+ }
+}
+
+impl<T> UniqueArc<MaybeUninit<T>> {
+ /// Converts a `UniqueArc<MaybeUninit<T>>` into a `UniqueArc<T>` by writing a value into it.
+ pub fn write(mut self, value: T) -> UniqueArc<T> {
+ self.deref_mut().write(value);
+ unsafe { self.assume_init() }
+ }
+
+ /// Returns a UniqueArc<T>, assuming the MaybeUninit<T> has already been initialized.
+ ///
+ /// # Safety
+ /// The contents of the UniqueArc must have already been fully initialized.
+ pub unsafe fn assume_init(self) -> UniqueArc<T> {
+ let inner = ManuallyDrop::new(self).inner.ptr;
+ UniqueArc {
+ // SAFETY: The new `Arc` is taking over `ptr` from `self.inner` (which won't be
+ // dropped). The types are compatible because `MaybeUninit<T>` is compatible with `T`.
+ inner: unsafe { Arc::from_inner(inner.cast()) },
+ }
+ }
+}
+
+impl<T: ?Sized> From<UniqueArc<T>> for Pin<UniqueArc<T>> {
+ fn from(obj: UniqueArc<T>) -> Self {
+ // SAFETY: It is not possible to move/replace `T` inside a `Pin<UniqueArc<T>>` (unless `T`
+ // is `Unpin`), so it is ok to convert it to `Pin<UniqueArc<T>>`.
+ unsafe { Pin::new_unchecked(obj) }
+ }
+}
+
+impl<T: ?Sized> Deref for UniqueArc<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ self.inner.deref()
+ }
+}
+
+impl<T: ?Sized> DerefMut for UniqueArc<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: By the `Arc` type invariant, there is necessarily a reference to the object, so
+ // it is safe to dereference it. Additionally, we know there is only one reference when
+ // it's inside a `UniqueArc`, so it is safe to get a mutable reference.
+ unsafe { &mut self.inner.ptr.as_mut().data }
+ }
+}
+
+impl<T: fmt::Display + ?Sized> fmt::Display for UniqueArc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Display + ?Sized> fmt::Display for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Debug + ?Sized> fmt::Debug for UniqueArc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.deref(), f)
+ }
+}
+
+impl<T: fmt::Debug + ?Sized> fmt::Debug for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.deref(), f)
+ }
+}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
new file mode 100644
index 000000000000..4610f0bda650
--- /dev/null
+++ b/rust/kernel/sync/condvar.rs
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A condition variable.
+//!
+//! This module allows Rust code to use the kernel's [`struct wait_queue_head`] as a condition
+//! variable.
+
+use super::{Guard, Lock, LockClassKey, LockInfo, NeedsLockClass};
+use crate::{bindings, str::CStr, types::Opaque};
+use core::{marker::PhantomPinned, pin::Pin};
+
+/// Safely initialises a [`CondVar`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! condvar_init {
+ ($condvar:expr, $name:literal) => {
+ $crate::init_with_lockdep!($condvar, $name)
+ };
+}
+
+// TODO: `bindgen` is not generating this constant. Figure out why.
+const POLLFREE: u32 = 0x4000;
+
+/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to
+/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And
+/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or
+/// [`CondVar::notify_all`]) or because the thread received a signal.
+///
+/// [`struct wait_queue_head`]: ../../../include/linux/wait.h
+pub struct CondVar {
+ pub(crate) wait_list: Opaque<bindings::wait_queue_head>,
+
+ /// A condvar needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ _pin: PhantomPinned,
+}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for CondVar {}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
+// concurrently.
+unsafe impl Sync for CondVar {}
+
+impl CondVar {
+ /// Constructs a new conditional variable.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call `CondVar::init` before using the conditional variable.
+ pub const unsafe fn new() -> Self {
+ Self {
+ wait_list: Opaque::uninit(),
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or
+ /// [`CondVar::notify_all`], or when the thread receives a signal.
+ ///
+ /// Returns whether there is a signal pending.
+ #[must_use = "wait returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait<L: Lock<I>, I: LockInfo>(&self, guard: &mut Guard<'_, L, I>) -> bool {
+ let lock = guard.lock;
+ let wait = Opaque::<bindings::wait_queue_entry>::uninit();
+
+ // SAFETY: `wait` points to valid memory.
+ unsafe { bindings::init_wait(wait.get()) };
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe {
+ bindings::prepare_to_wait_exclusive(
+ self.wait_list.get(),
+ wait.get(),
+ bindings::TASK_INTERRUPTIBLE as _,
+ )
+ };
+
+ // SAFETY: The guard is evidence that the caller owns the lock.
+ unsafe { lock.unlock(&mut guard.context) };
+
+ // SAFETY: No arguments, switches to another thread.
+ unsafe { bindings::schedule() };
+
+ guard.context = lock.lock_noguard();
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) };
+
+ // Replace when kernel::task is upstream
+ //Task::current().signal_pending()
+ unsafe { bindings::signal_pending(bindings::get_current()) != 0 }
+ }
+
+ /// Calls the kernel function to notify the appropriate number of threads with the given flags.
+ fn notify(&self, count: i32, flags: u32) {
+ // SAFETY: `wait_list` points to valid memory.
+ unsafe {
+ bindings::__wake_up(
+ self.wait_list.get(),
+ bindings::TASK_NORMAL,
+ count,
+ flags as _,
+ )
+ };
+ }
+
+ /// Wakes a single waiter up, if any. This is not 'sticky' in the sense that if no thread is
+ /// waiting, the notification is lost completely (as opposed to automatically waking up the
+ /// next waiter).
+ pub fn notify_one(&self) {
+ self.notify(1, 0);
+ }
+
+ /// Wakes all waiters up, if any. This is not 'sticky' in the sense that if no thread is
+ /// waiting, the notification is lost completely (as opposed to automatically waking up the
+ /// next waiter).
+ pub fn notify_all(&self) {
+ self.notify(0, 0);
+ }
+
+ /// Wakes all waiters up. If they were added by `epoll`, they are also removed from the list of
+ /// waiters. This is useful when cleaning up a condition variable that may be waited on by
+ /// threads that use `epoll`.
+ pub fn free_waiters(&self) {
+ self.notify(1, bindings::POLLHUP | POLLFREE);
+ }
+}
+
+impl NeedsLockClass for CondVar {
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key: &'static LockClassKey,
+ _: &'static LockClassKey,
+ ) {
+ unsafe {
+ bindings::__init_waitqueue_head(self.wait_list.get(), name.as_char_ptr(), key.get())
+ };
+ }
+}
diff --git a/rust/kernel/sync/guard.rs b/rust/kernel/sync/guard.rs
new file mode 100644
index 000000000000..1546e2c7dfeb
--- /dev/null
+++ b/rust/kernel/sync/guard.rs
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A generic lock guard and trait.
+//!
+//! This module contains a lock guard that can be used with any locking primitive that implements
+//! the ([`Lock`]) trait. It also contains the definition of the trait, which can be leveraged by
+//! other constructs to work on generic locking primitives.
+
+use super::{LockClassKey, NeedsLockClass};
+use crate::{
+ str::CStr,
+ types::{Bool, False, True},
+};
+use core::pin::Pin;
+
+/// Allows mutual exclusion primitives that implement the [`Lock`] trait to automatically unlock
+/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
+/// protected by the lock.
+#[must_use = "the lock unlocks immediately when the guard is unused"]
+pub struct Guard<'a, L: Lock<I> + ?Sized, I: LockInfo = WriteLock> {
+ pub(crate) lock: &'a L,
+ pub(crate) context: L::GuardContext,
+}
+
+// SAFETY: `Guard` is sync when the data protected by the lock is also sync. This is more
+// conservative than the default compiler implementation; more details can be found on
+// <https://github.com/rust-lang/rust/issues/41622> -- it refers to `MutexGuard` from the standard
+// library.
+unsafe impl<L, I> Sync for Guard<'_, L, I>
+where
+ L: Lock<I> + ?Sized,
+ L::Inner: Sync,
+ I: LockInfo,
+{
+}
+
+impl<L: Lock<I> + ?Sized, I: LockInfo> core::ops::Deref for Guard<'_, L, I> {
+ type Target = L::Inner;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &*self.lock.locked_data().get() }
+ }
+}
+
+impl<L: Lock<I> + ?Sized, I: LockInfo<Writable = True>> core::ops::DerefMut for Guard<'_, L, I> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &mut *self.lock.locked_data().get() }
+ }
+}
+
+impl<L: Lock<I> + ?Sized, I: LockInfo> Drop for Guard<'_, L, I> {
+ fn drop(&mut self) {
+ // SAFETY: The caller owns the lock, so it is safe to unlock it.
+ unsafe { self.lock.unlock(&mut self.context) };
+ }
+}
+
+impl<'a, L: Lock<I> + ?Sized, I: LockInfo> Guard<'a, L, I> {
+ /// Constructs a new immutable lock guard.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that it owns the lock.
+ pub(crate) unsafe fn new(lock: &'a L, context: L::GuardContext) -> Self {
+ Self { lock, context }
+ }
+}
+
+/// Specifies properties of a lock.
+pub trait LockInfo {
+ /// Determines if the data protected by a lock is writable.
+ type Writable: Bool;
+}
+
+/// A marker for locks that only allow reading.
+pub struct ReadLock;
+impl LockInfo for ReadLock {
+ type Writable = False;
+}
+
+/// A marker for locks that allow reading and writing.
+pub struct WriteLock;
+impl LockInfo for WriteLock {
+ type Writable = True;
+}
+
+/// A generic mutual exclusion primitive.
+///
+/// [`Guard`] is written such that any mutual exclusion primitive that can implement this trait can
+/// also benefit from having an automatic way to unlock itself.
+///
+/// # Safety
+///
+/// - Implementers of this trait with the [`WriteLock`] marker must ensure that only one thread/CPU
+/// may access the protected data once the lock is held, that is, between calls to `lock_noguard`
+/// and `unlock`.
+/// - Implementers of all other markers must ensure that a mutable reference to the protected data
+/// is not active in any thread/CPU because at least one shared reference is active between calls
+/// to `lock_noguard` and `unlock`.
+pub unsafe trait Lock<I: LockInfo = WriteLock> {
+ /// The type of the data protected by the lock.
+ type Inner: ?Sized;
+
+ /// The type of context, if any, that needs to be stored in the guard.
+ type GuardContext;
+
+ /// Acquires the lock, making the caller its owner.
+ #[must_use]
+ fn lock_noguard(&self) -> Self::GuardContext;
+
+ /// Reacquires the lock, making the caller its owner.
+ ///
+ /// The guard context before the last unlock is passed in.
+ ///
+ /// Locks that don't require this state on relock can simply use the default implementation
+ /// that calls [`Lock::lock_noguard`].
+ fn relock(&self, ctx: &mut Self::GuardContext) {
+ *ctx = self.lock_noguard();
+ }
+
+ /// Releases the lock, giving up ownership of the lock.
+ ///
+ /// # Safety
+ ///
+ /// It must only be called by the current owner of the lock.
+ unsafe fn unlock(&self, context: &mut Self::GuardContext);
+
+ /// Returns the data protected by the lock.
+ fn locked_data(&self) -> &core::cell::UnsafeCell<Self::Inner>;
+}
+
+/// A creator of instances of a mutual exclusion (lock) primitive.
+pub trait LockFactory {
+ /// The parametrised type of the mutual exclusion primitive that can be created by this factory.
+ type LockedType<T>;
+
+ /// Constructs a new instance of the mutual exclusion primitive.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`LockIniter::init_lock`] before using the lock.
+ unsafe fn new_lock<T>(data: T) -> Self::LockedType<T>;
+}
+
+/// A lock that can be initialised with a single lock class key.
+pub trait LockIniter {
+ /// Initialises the lock instance so that it can be safely used.
+ fn init_lock(self: Pin<&mut Self>, name: &'static CStr, key: &'static LockClassKey);
+}
+
+impl<L: LockIniter> NeedsLockClass for L {
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key: &'static LockClassKey,
+ _: &'static LockClassKey,
+ ) {
+ self.init_lock(name, key);
+ }
+}
diff --git a/rust/kernel/sync/mutex.rs b/rust/kernel/sync/mutex.rs
new file mode 100644
index 000000000000..c40396c15453
--- /dev/null
+++ b/rust/kernel/sync/mutex.rs
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel mutex.
+//!
+//! This module allows Rust code to use the kernel's [`struct mutex`].
+
+use super::{Guard, Lock, LockClassKey, LockFactory, LockIniter, WriteLock};
+use crate::{bindings, str::CStr, types::Opaque};
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+
+/// Safely initialises a [`Mutex`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! mutex_init {
+ ($mutex:expr, $name:literal) => {
+ $crate::init_with_lockdep!($mutex, $name)
+ };
+}
+
+/// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex,
+/// only one at a time is allowed to progress, the others will block (sleep) until the mutex is
+/// unlocked, at which point another thread will be allowed to wake up and make progress.
+///
+/// A [`Mutex`] must first be initialised with a call to [`Mutex::init_lock`] before it can be
+/// used. The [`mutex_init`] macro is provided to automatically assign a new lock class to a mutex
+/// instance.
+///
+/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts.
+///
+/// [`struct mutex`]: ../../../include/linux/mutex.h
+pub struct Mutex<T: ?Sized> {
+ /// The kernel `struct mutex` object.
+ mutex: Opaque<bindings::mutex>,
+
+ /// A mutex needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ _pin: PhantomPinned,
+
+ /// The data protected by the mutex.
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `Mutex` can be transferred across thread boundaries iff the data it protects can.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+// SAFETY: `Mutex` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+impl<T> Mutex<T> {
+ /// Constructs a new mutex.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`Mutex::init_lock`] before using the mutex.
+ pub const unsafe fn new(t: T) -> Self {
+ Self {
+ mutex: Opaque::uninit(),
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Locks the mutex and gives the caller access to the data protected by it. Only one thread at
+ /// a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self> {
+ let ctx = self.lock_noguard();
+ // SAFETY: The mutex was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<T> LockFactory for Mutex<T> {
+ type LockedType<U> = Mutex<U>;
+
+ unsafe fn new_lock<U>(data: U) -> Mutex<U> {
+ // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called.
+ unsafe { Mutex::new(data) }
+ }
+}
+
+impl<T> LockIniter for Mutex<T> {
+ fn init_lock(self: Pin<&mut Self>, name: &'static CStr, key: &'static LockClassKey) {
+ unsafe { bindings::__mutex_init(self.mutex.get(), name.as_char_ptr(), key.get()) };
+ }
+}
+
+pub struct EmptyGuardContext;
+
+// SAFETY: The underlying kernel `struct mutex` object ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock for Mutex<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ // SAFETY: `mutex` points to valid memory.
+ unsafe { bindings::mutex_lock(self.mutex.get()) };
+ EmptyGuardContext
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The safety requirements of the function ensure that the mutex is owned by the
+ // caller.
+ unsafe { bindings::mutex_unlock(self.mutex.get()) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+/// A revocable mutex.
+///
+/// That is, a mutex to which access can be revoked at runtime. It is a specialisation of the more
+/// generic [`super::revocable::Revocable`].
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::RevocableMutex;
+/// # use kernel::revocable_init;
+/// # use core::pin::Pin;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn read_sum(v: &RevocableMutex<Example>) -> Option<u32> {
+/// let guard = v.try_write()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// // SAFETY: We call `revocable_init` immediately below.
+/// let mut v = unsafe { RevocableMutex::new(Example { a: 10, b: 20 }) };
+/// // SAFETY: We never move out of `v`.
+/// let pinned = unsafe { Pin::new_unchecked(&mut v) };
+/// revocable_init!(pinned, "example::v");
+/// assert_eq!(read_sum(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(read_sum(&v), None);
+/// ```
+pub type RevocableMutex<T> = super::revocable::Revocable<Mutex<()>, T>;
+
+/// A guard for a revocable mutex.
+pub type RevocableMutexGuard<'a, T, I = WriteLock> =
+ super::revocable::RevocableGuard<'a, Mutex<()>, T, I>;
diff --git a/rust/kernel/sync/rcu.rs b/rust/kernel/sync/rcu.rs
new file mode 100644
index 000000000000..1a1c8ea49359
--- /dev/null
+++ b/rust/kernel/sync/rcu.rs
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! RCU support.
+//!
+//! C header: [`include/linux/rcupdate.h`](../../../../include/linux/rcupdate.h)
+
+use crate::bindings;
+use core::marker::PhantomData;
+
+/// Evidence that the RCU read side lock is held on the current thread/CPU.
+///
+/// The type is explicitly not `Send` because this property is per-thread/CPU.
+///
+/// # Invariants
+///
+/// The RCU read side lock is actually held while instances of this guard exist.
+pub struct Guard {
+ _not_send: PhantomData<*mut ()>,
+}
+
+impl Guard {
+ /// Acquires the RCU read side lock and returns a guard.
+ pub fn new() -> Self {
+ // SAFETY: An FFI call with no additional requirements.
+ unsafe { bindings::rcu_read_lock() };
+ // INVARIANT: The RCU read side lock was just acquired above.
+ Self {
+ _not_send: PhantomData,
+ }
+ }
+
+ /// Explicitly releases the RCU read side lock.
+ pub fn unlock(self) {}
+}
+
+impl Default for Guard {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl Drop for Guard {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, the rcu read side is locked, so it is ok to unlock it.
+ unsafe { bindings::rcu_read_unlock() };
+ }
+}
+
+/// Acquires the RCU read side lock.
+pub fn read_lock() -> Guard {
+ Guard::new()
+}
diff --git a/rust/kernel/sync/revocable.rs b/rust/kernel/sync/revocable.rs
new file mode 100644
index 000000000000..db716182a2a8
--- /dev/null
+++ b/rust/kernel/sync/revocable.rs
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Synchronisation primitives where access to their contents can be revoked at runtime.
+
+use crate::{
+ str::CStr,
+ sync::{Guard, Lock, LockClassKey, LockFactory, LockInfo, NeedsLockClass, ReadLock, WriteLock},
+ types::True,
+};
+use core::{
+ mem::MaybeUninit,
+ ops::{Deref, DerefMut},
+ pin::Pin,
+};
+
+/// The state within the revocable synchronisation primitive.
+///
+/// We don't use simply `Option<T>` because we need to drop in-place because the contents are
+/// implicitly pinned.
+///
+/// # Invariants
+///
+/// The `is_available` field determines if `data` is initialised.
+pub struct Inner<T> {
+ is_available: bool,
+ data: MaybeUninit<T>,
+}
+
+impl<T> Inner<T> {
+ fn new(data: T) -> Self {
+ // INVARIANT: `data` is initialised and `is_available` is `true`, so the state matches.
+ Self {
+ is_available: true,
+ data: MaybeUninit::new(data),
+ }
+ }
+
+ fn drop_in_place(&mut self) {
+ if !self.is_available {
+ // Already dropped.
+ return;
+ }
+
+ // INVARIANT: `data` is being dropped and `is_available` is set to `false`, so the state
+ // matches.
+ self.is_available = false;
+
+ // SAFETY: By the type invariants, `data` is valid because `is_available` was true.
+ unsafe { self.data.assume_init_drop() };
+ }
+}
+
+impl<T> Drop for Inner<T> {
+ fn drop(&mut self) {
+ self.drop_in_place();
+ }
+}
+
+/// Revocable synchronisation primitive.
+///
+/// That is, it wraps synchronisation primitives so that access to their contents can be revoked at
+/// runtime, rendering them inacessible.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`RevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// For better ergonomics, we advise the use of specialisations of this struct, for example,
+/// [`super::RevocableMutex`] and [`super::RevocableRwSemaphore`]. Callers that do not need to
+/// sleep while holding on to a guard should use [`crate::revocable::Revocable`] instead, which is
+/// more efficient as it uses RCU to keep objects alive.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::{Mutex, Revocable};
+/// # use kernel::revocable_init;
+/// # use core::pin::Pin;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Mutex<()>, Example>) -> Option<u32> {
+/// let mut guard = v.try_write()?;
+/// guard.a += 2;
+/// guard.b += 2;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// // SAFETY: We call `revocable_init` immediately below.
+/// let mut v = unsafe { Revocable::<Mutex<()>, Example>::new(Example { a: 10, b: 20 }) };
+/// // SAFETY: We never move out of `v`.
+/// let pinned = unsafe { Pin::new_unchecked(&mut v) };
+/// revocable_init!(pinned, "example::v");
+/// assert_eq!(add_two(&v), Some(34));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+pub struct Revocable<F: LockFactory, T> {
+ inner: F::LockedType<Inner<T>>,
+}
+
+/// Safely initialises a [`Revocable`] instance with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! revocable_init {
+ ($mutex:expr, $name:literal) => {
+ $crate::init_with_lockdep!($mutex, $name)
+ };
+}
+
+impl<F: LockFactory, T> Revocable<F, T> {
+ /// Creates a new revocable instance of the given lock.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`Revocable::init`] before using the revocable synch primitive.
+ pub unsafe fn new(data: T) -> Self {
+ Self {
+ // SAFETY: The safety requirements of this function require that `Revocable::init`
+ // be called before the returned object can be used. Lock initialisation is called
+ // from `Revocable::init`.
+ inner: unsafe { F::new_lock(Inner::new(data)) },
+ }
+ }
+}
+
+impl<F: LockFactory, T> NeedsLockClass for Revocable<F, T>
+where
+ F::LockedType<Inner<T>>: NeedsLockClass,
+{
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key1: &'static LockClassKey,
+ key2: &'static LockClassKey,
+ ) {
+ // SAFETY: `inner` is pinned when `self` is.
+ let inner = unsafe { self.map_unchecked_mut(|r| &mut r.inner) };
+ inner.init(name, key1, key2);
+ }
+}
+
+impl<F: LockFactory, T> Revocable<F, T>
+where
+ F::LockedType<Inner<T>>: Lock<Inner = Inner<T>>,
+{
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Revocation and dropping happen after ongoing accessors complete.
+ pub fn revoke(&self) {
+ self.lock().drop_in_place();
+ }
+
+ /// Tries to lock the \[revocable\] wrapped object in write (exclusive) mode.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. Callers are allowed to sleep while holding on
+ /// to the returned guard.
+ pub fn try_write(&self) -> Option<RevocableGuard<'_, F, T, WriteLock>> {
+ let inner = self.lock();
+ if !inner.is_available {
+ return None;
+ }
+ Some(RevocableGuard::new(inner))
+ }
+
+ fn lock(&self) -> Guard<'_, F::LockedType<Inner<T>>> {
+ let ctx = self.inner.lock_noguard();
+ // SAFETY: The lock was acquired in the call above.
+ unsafe { Guard::new(&self.inner, ctx) }
+ }
+}
+
+impl<F: LockFactory, T> Revocable<F, T>
+where
+ F::LockedType<Inner<T>>: Lock<ReadLock, Inner = Inner<T>>,
+{
+ /// Tries to lock the \[revocable\] wrapped object in read (shared) mode.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. Callers are allowed to sleep while holding on
+ /// to the returned guard.
+ pub fn try_read(&self) -> Option<RevocableGuard<'_, F, T, ReadLock>> {
+ let ctx = self.inner.lock_noguard();
+ // SAFETY: The lock was acquired in the call above.
+ let inner = unsafe { Guard::new(&self.inner, ctx) };
+ if !inner.is_available {
+ return None;
+ }
+ Some(RevocableGuard::new(inner))
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+pub struct RevocableGuard<'a, F: LockFactory, T, I: LockInfo>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ guard: Guard<'a, F::LockedType<Inner<T>>, I>,
+}
+
+impl<'a, F: LockFactory, T, I: LockInfo> RevocableGuard<'a, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ fn new(guard: Guard<'a, F::LockedType<Inner<T>>, I>) -> Self {
+ Self { guard }
+ }
+}
+
+impl<F: LockFactory, T, I: LockInfo<Writable = True>> RevocableGuard<'_, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ /// Returns a pinned mutable reference to the wrapped object.
+ pub fn as_pinned_mut(&mut self) -> Pin<&mut T> {
+ // SAFETY: Revocable mutexes must be pinned, so we choose to always project the data as
+ // pinned as well (i.e., we guarantee we never move it).
+ unsafe { Pin::new_unchecked(&mut *self) }
+ }
+}
+
+impl<F: LockFactory, T, I: LockInfo> Deref for RevocableGuard<'_, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.guard.data.as_ptr() }
+ }
+}
+
+impl<F: LockFactory, T, I: LockInfo<Writable = True>> DerefMut for RevocableGuard<'_, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *self.guard.data.as_mut_ptr() }
+ }
+}
diff --git a/rust/kernel/sync/smutex.rs b/rust/kernel/sync/smutex.rs
new file mode 100644
index 000000000000..6cf92260ace6
--- /dev/null
+++ b/rust/kernel/sync/smutex.rs
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A simple mutex implementation.
+//!
+//! Differently from [`super::Mutex`], this implementation does not require pinning, so the
+//! ergonomics are much improved, though the implementation is not as feature-rich as the C-based
+//! one. The main advantage is that it doesn't impose unsafe blocks on callers.
+//!
+//! The mutex is made up of 2 words in addition to the data it protects. The first one is accessed
+//! concurrently by threads trying to acquire and release the mutex, it contains a "stack" of
+//! waiters and a "locked" bit; the second one is only accessible by the thread holding the mutex,
+//! it contains a queue of waiters. Waiters are moved from the stack to the queue when the mutex is
+//! next unlocked while the stack is non-empty and the queue is empty. A single waiter is popped
+//! from the wait queue when the owner of the mutex unlocks it.
+//!
+//! The initial state of the mutex is `<locked=0, stack=[], queue=[]>`, meaning that it isn't
+//! locked and both the waiter stack and queue are empty.
+//!
+//! A lock operation transitions the mutex to state `<locked=1, stack=[], queue=[]>`.
+//!
+//! An unlock operation transitions the mutex back to the initial state, however, an attempt to
+//! lock the mutex while it's already locked results in a waiter being created (on the stack) and
+//! pushed onto the stack, so the state is `<locked=1, stack=[W1], queue=[]>`.
+//!
+//! Another thread trying to lock the mutex results in another waiter being pushed onto the stack,
+//! so the state becomes `<locked=1, stack=[W2, W1], queue=[]>`.
+//!
+//! In such states (queue is empty but stack is non-empty), the unlock operation is performed in
+//! three steps:
+//! 1. The stack is popped (but the mutex remains locked), so the state is:
+//! `<locked=1, stack=[], queue=[]>`
+//! 2. The stack is turned into a queue by reversing it, so the state is:
+//! `<locked=1, stack=[], queue=[W1, W2]>
+//! 3. Finally, the lock is released, and the first waiter is awakened, so the state is:
+//! `<locked=0, stack=[], queue=[W2]>`
+//!
+//! The mutex remains accessible to any threads attempting to lock it in any of the intermediate
+//! states above. For example, while it is locked, other threads may add waiters to the stack
+//! (which is ok because we want to release the ones on the queue first); another example is that
+//! another thread may acquire the mutex before waiter W1 in the example above, this makes the
+//! mutex unfair but this is desirable because the thread is running already and may in fact
+//! release the lock before W1 manages to get scheduled -- it also mitigates the lock convoy
+//! problem when the releasing thread wants to immediately acquire the lock again: it will be
+//! allowed to do so (as long as W1 doesn't get to it first).
+//!
+//! When the waiter queue is non-empty, unlocking the mutex always results in the first waiter being
+//! popped form the queue and awakened.
+
+use super::{mutex::EmptyGuardContext, Guard, Lock, LockClassKey, LockFactory, LockIniter};
+use crate::{bindings, str::CStr, types::Opaque};
+use core::sync::atomic::{AtomicUsize, Ordering};
+use core::{cell::UnsafeCell, pin::Pin};
+
+/// The value that is OR'd into the [`Mutex::waiter_stack`] when the mutex is locked.
+const LOCKED: usize = 1;
+
+/// A simple mutex.
+///
+/// This is mutual-exclusion primitive. It guarantees that only one thread at a time may access the
+/// data it protects. When multiple threads attempt to lock the same mutex, only one at a time is
+/// allowed to progress, the others will block (sleep) until the mutex is unlocked, at which point
+/// another thread will be allowed to wake up and make progress.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{Result, sync::Arc, sync::smutex::Mutex};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// static EXAMPLE: Mutex<Example> = Mutex::new(Example { a: 10, b: 20 });
+///
+/// fn inc_a(example: &Mutex<Example>) {
+/// let mut guard = example.lock();
+/// guard.a += 1;
+/// }
+///
+/// fn sum(example: &Mutex<Example>) -> u32 {
+/// let guard = example.lock();
+/// guard.a + guard.b
+/// }
+///
+/// fn try_new(a: u32, b: u32) -> Result<Arc<Mutex<Example>>> {
+/// Arc::try_new(Mutex::new(Example { a, b }))
+/// }
+///
+/// assert_eq!(EXAMPLE.lock().a, 10);
+/// assert_eq!(sum(&EXAMPLE), 30);
+///
+/// inc_a(&EXAMPLE);
+///
+/// assert_eq!(EXAMPLE.lock().a, 11);
+/// assert_eq!(sum(&EXAMPLE), 31);
+///
+/// # try_new(42, 43);
+/// ```
+pub struct Mutex<T: ?Sized> {
+ /// A stack of waiters.
+ ///
+ /// It is accessed atomically by threads lock/unlocking the mutex. Additionally, the
+ /// least-significant bit is used to indicate whether the mutex is locked or not.
+ waiter_stack: AtomicUsize,
+
+ /// A queue of waiters.
+ ///
+ /// This is only accessible to the holder of the mutex. When the owner of the mutex is
+ /// unlocking it, it will move waiters from the stack to the queue when the queue is empty and
+ /// the stack non-empty.
+ waiter_queue: UnsafeCell<*mut Waiter>,
+
+ /// The data protected by the mutex.
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `Mutex` can be transferred across thread boundaries iff the data it protects can.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+// SAFETY: `Mutex` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+impl<T> Mutex<T> {
+ /// Creates a new instance of the mutex.
+ pub const fn new(data: T) -> Self {
+ Self {
+ waiter_stack: AtomicUsize::new(0),
+ waiter_queue: UnsafeCell::new(core::ptr::null_mut()),
+ data: UnsafeCell::new(data),
+ }
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Locks the mutex and gives the caller access to the data protected by it. Only one thread at
+ /// a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self> {
+ let ctx = self.lock_noguard();
+ // SAFETY: The mutex was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<T> LockFactory for Mutex<T> {
+ type LockedType<U> = Mutex<U>;
+
+ unsafe fn new_lock<U>(data: U) -> Mutex<U> {
+ Mutex::new(data)
+ }
+}
+
+impl<T> LockIniter for Mutex<T> {
+ fn init_lock(self: Pin<&mut Self>, _name: &'static CStr, _key: &'static LockClassKey) {}
+}
+
+// SAFETY: The mutex implementation ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock for Mutex<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ loop {
+ // Try the fast path: the caller owns the mutex if we manage to set the `LOCKED` bit.
+ //
+ // The `acquire` order matches with one of the `release` ones in `unlock`.
+ if self.waiter_stack.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 {
+ return EmptyGuardContext;
+ }
+
+ // Slow path: we'll likely need to wait, so initialise a local waiter struct.
+ let mut waiter = Waiter {
+ completion: Opaque::uninit(),
+ next: core::ptr::null_mut(),
+ };
+
+ // SAFETY: The completion object was just allocated on the stack and is valid for
+ // writes.
+ unsafe { bindings::init_completion(waiter.completion.get()) };
+
+ // Try to enqueue the waiter by pushing into onto the waiter stack. We want to do it
+ // only while the mutex is locked by another thread.
+ loop {
+ // We use relaxed here because we're just reading the value we'll CAS later (which
+ // has a stronger ordering on success).
+ let mut v = self.waiter_stack.load(Ordering::Relaxed);
+ if v & LOCKED == 0 {
+ // The mutex was released by another thread, so try to acquire it.
+ //
+ // The `acquire` order matches with one of the `release` ones in `unlock`.
+ v = self.waiter_stack.fetch_or(LOCKED, Ordering::Acquire);
+ if v & LOCKED == 0 {
+ return EmptyGuardContext;
+ }
+ }
+
+ waiter.next = (v & !LOCKED) as _;
+
+ // The `release` order matches with `acquire` in `unlock` when the stack is swapped
+ // out. We use release order here to ensure that the other thread can see our
+ // waiter fully initialised.
+ if self
+ .waiter_stack
+ .compare_exchange(
+ v,
+ (&mut waiter as *mut _ as usize) | LOCKED,
+ Ordering::Release,
+ Ordering::Relaxed,
+ )
+ .is_ok()
+ {
+ break;
+ }
+ }
+
+ // Wait for the owner to lock to wake this thread up.
+ //
+ // SAFETY: Completion object was previously initialised with `init_completion` and
+ // remains valid.
+ unsafe { bindings::wait_for_completion(waiter.completion.get()) };
+ }
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The caller owns the mutex, so it is safe to manipulate the local wait queue.
+ let mut waiter = unsafe { *self.waiter_queue.get() };
+ loop {
+ // If we have a non-empty local queue of waiters, pop the first one, release the mutex,
+ // and wake it up (the popped waiter).
+ if !waiter.is_null() {
+ // SAFETY: The caller owns the mutex, so it is safe to manipulate the local wait
+ // queue.
+ unsafe { *self.waiter_queue.get() = (*waiter).next };
+
+ // The `release` order matches with one of the `acquire` ones in `lock_noguard`.
+ self.waiter_stack.fetch_and(!LOCKED, Ordering::Release);
+
+ // Wake up the first waiter.
+ //
+ // SAFETY: The completion object was initialised before being added to the wait
+ // stack and is only removed above, when called completed. So it is safe for
+ // writes.
+ unsafe { bindings::complete_all((*waiter).completion.get()) };
+ return;
+ }
+
+ // Try the fast path when there are no local waiters.
+ //
+ // The `release` order matches with one of the `acquire` ones in `lock_noguard`.
+ if self
+ .waiter_stack
+ .compare_exchange(LOCKED, 0, Ordering::Release, Ordering::Relaxed)
+ .is_ok()
+ {
+ return;
+ }
+
+ // We don't have a local queue, so pull the whole stack off, reverse it, and use it as a
+ // local queue. Since we're manipulating this queue, we need to keep ownership of the
+ // mutex.
+ //
+ // The `acquire` order matches with the `release` one in `lock_noguard` where a waiter
+ // is pushed onto the stack. It ensures that we see the fully-initialised waiter.
+ let mut stack =
+ (self.waiter_stack.swap(LOCKED, Ordering::Acquire) & !LOCKED) as *mut Waiter;
+ while !stack.is_null() {
+ // SAFETY: The caller still owns the mutex, so it is safe to manipulate the
+ // elements of the wait queue, which will soon become that wait queue.
+ let next = unsafe { (*stack).next };
+
+ // SAFETY: Same as above.
+ unsafe { (*stack).next = waiter };
+
+ waiter = stack;
+ stack = next;
+ }
+ }
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+struct Waiter {
+ completion: Opaque<bindings::completion>,
+ next: *mut Waiter,
+}
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
new file mode 100644
index 000000000000..881f45492268
--- /dev/null
+++ b/rust/kernel/time.rs
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Timekeeping functions.
+//!
+//! C header: [`include/linux/ktime.h`](../../../../include/linux/ktime.h)
+//! C header: [`include/linux/timekeeping.h`](../../../../include/linux/timekeeping.h)
+
+use crate::bindings;
+use core::time::Duration;
+
+/// Returns the kernel time elapsed since boot, excluding time spent sleeping, as a [`Duration`].
+pub fn ktime_get() -> Duration {
+ Duration::from_nanos(unsafe { bindings::ktime_get() }.try_into().unwrap())
+}
+
+/// Returns the kernel time elapsed since boot, including time spent sleeping, as a [`Duration`].
+pub fn ktime_get_boottime() -> Duration {
+ Duration::from_nanos(
+ unsafe { bindings::ktime_get_with_offset(bindings::tk_offsets_TK_OFFS_BOOT) }
+ .try_into()
+ .unwrap(),
+ )
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index e84e51ec9716..6c49adf161c3 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -2,7 +2,220 @@
//! Kernel types.
-use core::{cell::UnsafeCell, mem::MaybeUninit};
+use alloc::boxed::Box;
+use core::{
+ cell::UnsafeCell,
+ mem::MaybeUninit,
+ ops::{Deref, DerefMut},
+};
+
+/// Used to transfer ownership to and from foreign (non-Rust) languages.
+///
+/// Ownership is transferred from Rust to a foreign language by calling [`Self::into_foreign`] and
+/// later may be transferred back to Rust by calling [`Self::from_foreign`].
+///
+/// This trait is meant to be used in cases when Rust objects are stored in C objects and
+/// eventually "freed" back to Rust.
+pub trait ForeignOwnable: Sized {
+ /// Type of values borrowed between calls to [`ForeignOwnable::into_foreign`] and
+ /// [`ForeignOwnable::from_foreign`].
+ type Borrowed<'a>;
+
+ /// Converts a Rust-owned object to a foreign-owned one.
+ ///
+ /// The foreign representation is a pointer to void.
+ fn into_foreign(self) -> *const core::ffi::c_void;
+
+ /// Borrows a foreign-owned object.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
+ /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
+ /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow_mut`]
+ /// for this object must have been dropped.
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>;
+
+ /// Mutably borrows a foreign-owned object.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
+ /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
+ /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and
+ /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped.
+ unsafe fn borrow_mut(ptr: *const core::ffi::c_void) -> ScopeGuard<Self, fn(Self)> {
+ // SAFETY: The safety requirements ensure that `ptr` came from a previous call to
+ // `into_foreign`.
+ ScopeGuard::new_with_data(unsafe { Self::from_foreign(ptr) }, |d| {
+ d.into_foreign();
+ })
+ }
+
+ /// Converts a foreign-owned object back to a Rust-owned one.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
+ /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
+ /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and
+ /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped.
+ unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self;
+}
+
+impl<T: 'static> ForeignOwnable for Box<T> {
+ type Borrowed<'a> = &'a T;
+
+ fn into_foreign(self) -> *const core::ffi::c_void {
+ Box::into_raw(self) as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> &'a T {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements of `from_foreign` also ensure that the object remains alive for
+ // the lifetime of the returned value.
+ unsafe { &*ptr.cast() }
+ }
+
+ unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ unsafe { Box::from_raw(ptr as _) }
+ }
+}
+
+impl ForeignOwnable for () {
+ type Borrowed<'a> = ();
+
+ fn into_foreign(self) -> *const core::ffi::c_void {
+ core::ptr::NonNull::dangling().as_ptr()
+ }
+
+ unsafe fn borrow<'a>(_: *const core::ffi::c_void) -> Self::Borrowed<'a> {}
+
+ unsafe fn from_foreign(_: *const core::ffi::c_void) -> Self {}
+}
+
+/// Runs a cleanup function/closure when dropped.
+///
+/// The [`ScopeGuard::dismiss`] function prevents the cleanup function from running.
+///
+/// # Examples
+///
+/// In the example below, we have multiple exit paths and we want to log regardless of which one is
+/// taken:
+/// ```
+/// # use kernel::ScopeGuard;
+/// fn example1(arg: bool) {
+/// let _log = ScopeGuard::new(|| pr_info!("example1 completed\n"));
+///
+/// if arg {
+/// return;
+/// }
+///
+/// pr_info!("Do something...\n");
+/// }
+///
+/// # example1(false);
+/// # example1(true);
+/// ```
+///
+/// In the example below, we want to log the same message on all early exits but a different one on
+/// the main exit path:
+/// ```
+/// # use kernel::ScopeGuard;
+/// fn example2(arg: bool) {
+/// let log = ScopeGuard::new(|| pr_info!("example2 returned early\n"));
+///
+/// if arg {
+/// return;
+/// }
+///
+/// // (Other early returns...)
+///
+/// log.dismiss();
+/// pr_info!("example2 no early return\n");
+/// }
+///
+/// # example2(false);
+/// # example2(true);
+/// ```
+///
+/// In the example below, we need a mutable object (the vector) to be accessible within the log
+/// function, so we wrap it in the [`ScopeGuard`]:
+/// ```
+/// # use kernel::ScopeGuard;
+/// fn example3(arg: bool) -> Result {
+/// let mut vec =
+/// ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
+///
+/// vec.try_push(10u8)?;
+/// if arg {
+/// return Ok(());
+/// }
+/// vec.try_push(20u8)?;
+/// Ok(())
+/// }
+///
+/// # assert_eq!(example3(false), Ok(()));
+/// # assert_eq!(example3(true), Ok(()));
+/// ```
+///
+/// # Invariants
+///
+/// The value stored in the struct is nearly always `Some(_)`, except between
+/// [`ScopeGuard::dismiss`] and [`ScopeGuard::drop`]: in this case, it will be `None` as the value
+/// will have been returned to the caller. Since [`ScopeGuard::dismiss`] consumes the guard,
+/// callers won't be able to use it anymore.
+pub struct ScopeGuard<T, F: FnOnce(T)>(Option<(T, F)>);
+
+impl<T, F: FnOnce(T)> ScopeGuard<T, F> {
+ /// Creates a new guarded object wrapping the given data and with the given cleanup function.
+ pub fn new_with_data(data: T, cleanup_func: F) -> Self {
+ // INVARIANT: The struct is being initialised with `Some(_)`.
+ Self(Some((data, cleanup_func)))
+ }
+
+ /// Prevents the cleanup function from running and returns the guarded data.
+ pub fn dismiss(mut self) -> T {
+ // INVARIANT: This is the exception case in the invariant; it is not visible to callers
+ // because this function consumes `self`.
+ self.0.take().unwrap().0
+ }
+}
+
+impl ScopeGuard<(), fn(())> {
+ /// Creates a new guarded object with the given cleanup function.
+ pub fn new(cleanup: impl FnOnce()) -> ScopeGuard<(), impl FnOnce(())> {
+ ScopeGuard::new_with_data((), move |_| cleanup())
+ }
+}
+
+impl<T, F: FnOnce(T)> Deref for ScopeGuard<T, F> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // The type invariants guarantee that `unwrap` will succeed.
+ &self.0.as_ref().unwrap().0
+ }
+}
+
+impl<T, F: FnOnce(T)> DerefMut for ScopeGuard<T, F> {
+ fn deref_mut(&mut self) -> &mut T {
+ // The type invariants guarantee that `unwrap` will succeed.
+ &mut self.0.as_mut().unwrap().0
+ }
+}
+
+impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
+ fn drop(&mut self) {
+ // Run the cleanup function if one is still present.
+ if let Some((data, cleanup)) = self.0.take() {
+ cleanup(data)
+ }
+ }
+}
/// Stores an opaque value.
///
@@ -35,3 +248,84 @@ pub enum Either<L, R> {
/// Constructs an instance of [`Either`] containing a value of type `R`.
Right(R),
}
+
+/// A trait for boolean types.
+///
+/// This is meant to be used in type states to allow boolean constraints in implementation blocks.
+/// In the example below, the implementation containing `MyType::set_value` could _not_ be
+/// constrained to type states containing `Writable = true` if `Writable` were a constant instead
+/// of a type.
+///
+/// # Safety
+///
+/// No additional implementations of [`Bool`] should be provided, as [`True`] and [`False`] are
+/// already provided.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{Bool, False, True};
+/// use core::marker::PhantomData;
+///
+/// // Type state specifies whether the type is writable.
+/// trait MyTypeState {
+/// type Writable: Bool;
+/// }
+///
+/// // In state S1, the type is writable.
+/// struct S1;
+/// impl MyTypeState for S1 {
+/// type Writable = True;
+/// }
+///
+/// // In state S2, the type is not writable.
+/// struct S2;
+/// impl MyTypeState for S2 {
+/// type Writable = False;
+/// }
+///
+/// struct MyType<T: MyTypeState> {
+/// value: u32,
+/// _p: PhantomData<T>,
+/// }
+///
+/// impl<T: MyTypeState> MyType<T> {
+/// fn new(value: u32) -> Self {
+/// Self {
+/// value,
+/// _p: PhantomData,
+/// }
+/// }
+/// }
+///
+/// // This implementation block only applies if the type state is writable.
+/// impl<T> MyType<T>
+/// where
+/// T: MyTypeState<Writable = True>,
+/// {
+/// fn set_value(&mut self, v: u32) {
+/// self.value = v;
+/// }
+/// }
+///
+/// let mut x = MyType::<S1>::new(10);
+/// let mut y = MyType::<S2>::new(20);
+///
+/// x.set_value(30);
+///
+/// // The code below fails to compile because `S2` is not writable.
+/// // y.set_value(40);
+/// ```
+pub unsafe trait Bool {}
+
+/// Represents the `true` value for types with [`Bool`] bound.
+pub struct True;
+
+// SAFETY: This is one of the only two implementations of `Bool`.
+unsafe impl Bool for True {}
+
+/// Represents the `false` value for types wth [`Bool`] bound.
+pub struct False;
+
+// SAFETY: This is one of the only two implementations of `Bool`.
+unsafe impl Bool for False {}
diff --git a/rust/kernel/user_ptr.rs b/rust/kernel/user_ptr.rs
new file mode 100644
index 000000000000..084535675c4a
--- /dev/null
+++ b/rust/kernel/user_ptr.rs
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! User pointers.
+//!
+//! C header: [`include/linux/uaccess.h`](../../../../include/linux/uaccess.h)
+
+use crate::{
+ bindings,
+ error::code::*,
+ error::Result,
+ io_buffer::{IoBufferReader, IoBufferWriter},
+};
+use alloc::vec::Vec;
+
+/// A reference to an area in userspace memory, which can be either
+/// read-only or read-write.
+///
+/// All methods on this struct are safe: invalid pointers return
+/// `EFAULT`. Concurrent access, *including data races to/from userspace
+/// memory*, is permitted, because fundamentally another userspace
+/// thread/process could always be modifying memory at the same time
+/// (in the same way that userspace Rust's [`std::io`] permits data races
+/// with the contents of files on disk). In the presence of a race, the
+/// exact byte values read/written are unspecified but the operation is
+/// well-defined. Kernelspace code should validate its copy of data
+/// after completing a read, and not expect that multiple reads of the
+/// same address will return the same value.
+///
+/// All APIs enforce the invariant that a given byte of memory from userspace
+/// may only be read once. By preventing double-fetches we avoid TOCTOU
+/// vulnerabilities. This is accomplished by taking `self` by value to prevent
+/// obtaining multiple readers on a given [`UserSlicePtr`], and the readers
+/// only permitting forward reads.
+///
+/// Constructing a [`UserSlicePtr`] performs no checks on the provided
+/// address and length, it can safely be constructed inside a kernel thread
+/// with no current userspace process. Reads and writes wrap the kernel APIs
+/// `copy_from_user` and `copy_to_user`, which check the memory map of the
+/// current process and enforce that the address range is within the user
+/// range (no additional calls to `access_ok` are needed).
+///
+/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
+pub struct UserSlicePtr(*mut core::ffi::c_void, usize);
+
+impl UserSlicePtr {
+ /// Constructs a user slice from a raw pointer and a length in bytes.
+ ///
+ /// # Safety
+ ///
+ /// Callers must be careful to avoid time-of-check-time-of-use
+ /// (TOCTOU) issues. The simplest way is to create a single instance of
+ /// [`UserSlicePtr`] per user memory block as it reads each byte at
+ /// most once.
+ pub unsafe fn new(ptr: *mut core::ffi::c_void, length: usize) -> Self {
+ UserSlicePtr(ptr, length)
+ }
+
+ /// Reads the entirety of the user slice.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to
+ /// mapped, readable memory.
+ pub fn read_all(self) -> Result<Vec<u8>> {
+ self.reader().read_all()
+ }
+
+ /// Constructs a [`UserSlicePtrReader`].
+ pub fn reader(self) -> UserSlicePtrReader {
+ UserSlicePtrReader(self.0, self.1)
+ }
+
+ /// Writes the provided slice into the user slice.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to
+ /// mapped, writable memory (in which case some data from before the
+ /// fault may be written), or `data` is larger than the user slice
+ /// (in which case no data is written).
+ pub fn write_all(self, data: &[u8]) -> Result {
+ self.writer().write_slice(data)
+ }
+
+ /// Constructs a [`UserSlicePtrWriter`].
+ pub fn writer(self) -> UserSlicePtrWriter {
+ UserSlicePtrWriter(self.0, self.1)
+ }
+
+ /// Constructs both a [`UserSlicePtrReader`] and a [`UserSlicePtrWriter`].
+ pub fn reader_writer(self) -> (UserSlicePtrReader, UserSlicePtrWriter) {
+ (
+ UserSlicePtrReader(self.0, self.1),
+ UserSlicePtrWriter(self.0, self.1),
+ )
+ }
+}
+
+/// A reader for [`UserSlicePtr`].
+///
+/// Used to incrementally read from the user slice.
+pub struct UserSlicePtrReader(*mut core::ffi::c_void, usize);
+
+impl IoBufferReader for UserSlicePtrReader {
+ /// Returns the number of bytes left to be read from this.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ fn len(&self) -> usize {
+ self.1
+ }
+
+ /// Reads raw data from the user slice into a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The output buffer must be valid.
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result {
+ if len > self.1 || len > u32::MAX as usize {
+ return Err(EFAULT);
+ }
+ let res = unsafe { bindings::copy_from_user(out as _, self.0, len as _) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ // Since this is not a pointer to a valid object in our program,
+ // we cannot use `add`, which has C-style rules for defined
+ // behavior.
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ Ok(())
+ }
+}
+
+/// A writer for [`UserSlicePtr`].
+///
+/// Used to incrementally write into the user slice.
+pub struct UserSlicePtrWriter(*mut core::ffi::c_void, usize);
+
+impl IoBufferWriter for UserSlicePtrWriter {
+ fn len(&self) -> usize {
+ self.1
+ }
+
+ fn clear(&mut self, mut len: usize) -> Result {
+ let mut ret = Ok(());
+ if len > self.1 {
+ ret = Err(EFAULT);
+ len = self.1;
+ }
+
+ // SAFETY: The buffer will be validated by `clear_user`. We ensure that `len` is within
+ // bounds in the check above.
+ let left = unsafe { bindings::clear_user(self.0, len as _) } as usize;
+ if left != 0 {
+ ret = Err(EFAULT);
+ len -= left;
+ }
+
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ ret
+ }
+
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result {
+ if len > self.1 || len > u32::MAX as usize {
+ return Err(EFAULT);
+ }
+ let res = unsafe { bindings::copy_to_user(self.0, data as _, len as _) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ // Since this is not a pointer to a valid object in our program,
+ // we cannot use `add`, which has C-style rules for defined
+ // behavior.
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ Ok(())
+ }
+}
diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs
new file mode 100644
index 000000000000..4f09e8e613da
--- /dev/null
+++ b/rust/kernel/xarray.rs
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! XArray abstraction.
+//!
+//! C header: [`include/linux/xarray.h`](../../include/linux/xarray.h)
+
+use crate::{
+ bindings,
+ error::{Error, Result},
+ types::{ForeignOwnable, Opaque, ScopeGuard},
+};
+use core::{marker::PhantomData, ops::Deref};
+
+/// Flags passed to `XArray::new` to configure the `XArray`.
+type Flags = bindings::gfp_t;
+
+/// Flag values passed to `XArray::new` to configure the `XArray`.
+pub mod flags {
+ /// Use IRQ-safe locking
+ pub const LOCK_IRQ: super::Flags = bindings::BINDINGS_XA_FLAGS_LOCK_IRQ;
+ /// Use softirq-safe locking
+ pub const LOCK_BH: super::Flags = bindings::BINDINGS_XA_FLAGS_LOCK_BH;
+ /// Track which entries are free (distinct from None)
+ pub const TRACK_FREE: super::Flags = bindings::BINDINGS_XA_FLAGS_TRACK_FREE;
+ /// Initialize array index 0 as busy
+ pub const ZERO_BUSY: super::Flags = bindings::BINDINGS_XA_FLAGS_ZERO_BUSY;
+ /// Use GFP_ACCOUNT for internal memory allocations
+ pub const ACCOUNT: super::Flags = bindings::BINDINGS_XA_FLAGS_ACCOUNT;
+ /// Create an allocating `XArray` starting at index 0
+ pub const ALLOC: super::Flags = bindings::BINDINGS_XA_FLAGS_ALLOC;
+ /// Create an allocating `XArray` starting at index 1
+ pub const ALLOC1: super::Flags = bindings::BINDINGS_XA_FLAGS_ALLOC1;
+}
+
+/// Wrapper for a value owned by the `XArray` which holds the `XArray` lock until dropped.
+///
+/// # Invariants
+///
+/// The `*mut T` is always non-NULL and owned by the referenced `XArray`
+pub struct Guard<'a, T: ForeignOwnable>(*mut T, &'a Opaque<bindings::xarray>);
+
+impl<'a, T: ForeignOwnable> Guard<'a, T> {
+ /// Borrow the underlying value wrapped by the `Guard`.
+ ///
+ /// Returns a `T::Borrowed` type for the owned `ForeignOwnable` type.
+ pub fn borrow<'b>(&'b self) -> T::Borrowed<'b>
+ where
+ 'a: 'b,
+ {
+ // SAFETY: The value is owned by the `XArray`, the lifetime it is borrowed for must not
+ // outlive the `XArray` itself, nor the Guard that holds the lock ensuring the value
+ // remains in the `XArray`.
+ unsafe { T::borrow(self.0 as _) }
+ }
+}
+
+// Convenience impl for `ForeignOwnable` types whose `Borrowed`
+// form implements Deref.
+impl<'a, T: ForeignOwnable> Deref for Guard<'a, T>
+where
+ T::Borrowed<'static>: Deref,
+{
+ type Target = <T::Borrowed<'static> as Deref>::Target;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: See the `borrow()` method. The dereferenced `T::Borrowed` value
+ // must share the same lifetime, so we can return a reference to it.
+ // TODO: Is this really sound?
+ unsafe { &*(T::borrow(self.0 as _).deref() as *const _) }
+ }
+}
+
+impl<'a, T: ForeignOwnable> Drop for Guard<'a, T> {
+ fn drop(&mut self) {
+ // SAFETY: The XArray we have a reference to owns the C xarray object.
+ unsafe { bindings::xa_unlock(self.1.get()) };
+ }
+}
+
+/// Represents a reserved slot in an `XArray`, which does not yet have a value but has an assigned
+/// index and may not be allocated by any other user. If the Reservation is dropped without
+/// being filled, the entry is marked as available again.
+///
+/// Users must ensure that reserved slots are not filled by other mechanisms, or otherwise their
+/// contents may be dropped and replaced (which will print a warning).
+pub struct Reservation<'a, T: ForeignOwnable>(&'a XArray<T>, usize, PhantomData<T>);
+
+impl<'a, T: ForeignOwnable> Reservation<'a, T> {
+ /// Store a value into the reserved slot.
+ pub fn store(self, value: T) -> Result<usize> {
+ if self.0.replace(self.1, value)?.is_some() {
+ crate::pr_err!("XArray: Reservation stored but the entry already had data!\n");
+ // Consider it a success anyway, not much we can do
+ }
+ let index = self.1;
+ core::mem::forget(self);
+ Ok(index)
+ }
+
+ /// Returns the index of this reservation.
+ pub fn index(&self) -> usize {
+ self.1
+ }
+}
+
+impl<'a, T: ForeignOwnable> Drop for Reservation<'a, T> {
+ fn drop(&mut self) {
+ if self.0.remove(self.1).is_some() {
+ crate::pr_err!("XArray: Reservation dropped but the entry was not empty!\n");
+ }
+ }
+}
+
+/// An array which efficiently maps sparse integer indices to owned objects.
+///
+/// This is similar to a `Vec<Option<T>>`, but more efficient when there are holes in the
+/// index space, and can be efficiently grown.
+///
+/// This structure is expected to often be used with an inner type that can either be efficiently
+/// cloned, such as an `Arc<T>`.
+pub struct XArray<T: ForeignOwnable> {
+ xa: Opaque<bindings::xarray>,
+ _p: PhantomData<T>,
+}
+
+impl<T: ForeignOwnable> XArray<T> {
+ /// Creates a new `XArray` with the given flags.
+ pub fn new(flags: Flags) -> Result<XArray<T>> {
+ let xa = Opaque::uninit();
+
+ // SAFETY: We have just created `xa`. This data structure does not require
+ // pinning.
+ unsafe { bindings::xa_init_flags(xa.get(), flags) };
+
+ // INVARIANT: Initialize the `XArray` with a valid `xa`.
+ Ok(XArray {
+ xa,
+ _p: PhantomData,
+ })
+ }
+
+ /// Replaces an entry with a new value, returning the old value (if any).
+ pub fn replace(&self, index: usize, value: T) -> Result<Option<T>> {
+ let new = value.into_foreign();
+ let guard = ScopeGuard::new(|| unsafe {
+ T::from_foreign(new);
+ });
+
+ let old = unsafe {
+ bindings::xa_store(
+ self.xa.get(),
+ index.try_into()?,
+ new as *mut _,
+ bindings::GFP_KERNEL,
+ )
+ };
+
+ let err = unsafe { bindings::xa_err(old) };
+ if err != 0 {
+ Err(Error::from_kernel_errno(err))
+ } else if old.is_null() {
+ guard.dismiss();
+ Ok(None)
+ } else {
+ guard.dismiss();
+ Ok(Some(unsafe { T::from_foreign(old) }))
+ }
+ }
+
+ /// Replaces an entry with a new value, dropping the old value (if any).
+ pub fn set(&self, index: usize, value: T) -> Result {
+ self.replace(index, value)?;
+ Ok(())
+ }
+
+ /// Looks up and returns a reference to an entry in the array, returning a `Guard` if it
+ /// exists.
+ ///
+ /// This guard blocks all other actions on the `XArray`. Callers are expected to drop the
+ /// `Guard` eagerly to avoid blocking other users, such as by taking a clone of the value.
+ pub fn get(&self, index: usize) -> Option<Guard<'_, T>> {
+ // SAFETY: `self.xa` is always valid by the type invariant.
+ let p = unsafe {
+ bindings::xa_lock(self.xa.get());
+ bindings::xa_load(self.xa.get(), index.try_into().ok()?)
+ };
+
+ if p.is_null() {
+ unsafe { bindings::xa_unlock(self.xa.get()) };
+ None
+ } else {
+ Some(Guard(p as _, &self.xa))
+ }
+ }
+
+ /// Removes and returns an entry, returning it if it existed.
+ pub fn remove(&self, index: usize) -> Option<T> {
+ let p = unsafe { bindings::xa_erase(self.xa.get(), index.try_into().ok()?) };
+ if p.is_null() {
+ None
+ } else {
+ Some(unsafe { T::from_foreign(p) })
+ }
+ }
+
+ /// Allocate a new index in the array, optionally storing a new value into it, with
+ /// configurable bounds for the index range to allocate from.
+ ///
+ /// If `value` is `None`, then the index is reserved from further allocation but remains
+ /// free for storing a value into it.
+ pub fn alloc_limits(&self, value: Option<T>, min: u32, max: u32) -> Result<usize> {
+ let new = value.map_or(core::ptr::null(), |a| a.into_foreign());
+ let mut id: u32 = 0;
+
+ // SAFETY: `self.xa` is always valid by the type invariant. If this succeeds, it
+ // takes ownership of the passed `T` (if any). If it fails, we must drop the
+ // `T` again.
+ let ret = unsafe {
+ bindings::xa_alloc(
+ self.xa.get(),
+ &mut id,
+ new as *mut _,
+ bindings::xa_limit { min, max },
+ bindings::GFP_KERNEL,
+ )
+ };
+
+ if ret < 0 {
+ // Make sure to drop the value we failed to store
+ if !new.is_null() {
+ // SAFETY: If `new` is not NULL, it came from the `ForeignOwnable` we got
+ // from the caller.
+ unsafe { T::from_foreign(new) };
+ }
+ Err(Error::from_kernel_errno(ret))
+ } else {
+ Ok(id as usize)
+ }
+ }
+
+ /// Allocate a new index in the array, optionally storing a new value into it.
+ ///
+ /// If `value` is `None`, then the index is reserved from further allocation but remains
+ /// free for storing a value into it.
+ pub fn alloc(&self, value: Option<T>) -> Result<usize> {
+ self.alloc_limits(value, 0, u32::MAX)
+ }
+
+ /// Reserve a new index in the array within configurable bounds for the index.
+ ///
+ /// Returns a `Reservation` object, which can then be used to store a value at this index or
+ /// otherwise free it for reuse.
+ pub fn reserve_limits(&self, min: u32, max: u32) -> Result<Reservation<'_, T>> {
+ Ok(Reservation(
+ self,
+ self.alloc_limits(None, min, max)?,
+ PhantomData,
+ ))
+ }
+
+ /// Reserve a new index in the array.
+ ///
+ /// Returns a `Reservation` object, which can then be used to store a value at this index or
+ /// otherwise free it for reuse.
+ pub fn reserve(&self) -> Result<Reservation<'_, T>> {
+ Ok(Reservation(self, self.alloc(None)?, PhantomData))
+ }
+}
+
+impl<T: ForeignOwnable> Drop for XArray<T> {
+ fn drop(&mut self) {
+ // SAFETY: `self.xa` is valid by the type invariant, and as we have the only reference to
+ // the `XArray` we can safely iterate its contents and drop everything.
+ unsafe {
+ let mut index: core::ffi::c_ulong = 0;
+ let mut entry = bindings::xa_find(
+ self.xa.get(),
+ &mut index,
+ core::ffi::c_ulong::MAX,
+ bindings::BINDINGS_XA_PRESENT,
+ );
+ while !entry.is_null() {
+ T::from_foreign(entry);
+ entry = bindings::xa_find_after(
+ self.xa.get(),
+ &mut index,
+ core::ffi::c_ulong::MAX,
+ bindings::BINDINGS_XA_PRESENT,
+ );
+ }
+
+ bindings::xa_destroy(self.xa.get());
+ }
+ }
+}
+
+// SAFETY: XArray is thread-safe and all mutation operations are internally locked.
+unsafe impl<T: Send + ForeignOwnable> Send for XArray<T> {}
+unsafe impl<T: Sync + ForeignOwnable> Sync for XArray<T> {}
diff --git a/rust/macros/concat_idents.rs b/rust/macros/concat_idents.rs
index 7e4b450f3a50..d6614b900aa2 100644
--- a/rust/macros/concat_idents.rs
+++ b/rust/macros/concat_idents.rs
@@ -14,10 +14,28 @@ fn expect_ident(it: &mut token_stream::IntoIter) -> Ident {
pub(crate) fn concat_idents(ts: TokenStream) -> TokenStream {
let mut it = ts.into_iter();
- let a = expect_ident(&mut it);
- assert_eq!(expect_punct(&mut it), ',');
+ let mut out = TokenStream::new();
+ let a = loop {
+ let ident = expect_ident(&mut it);
+ let punct = expect_punct(&mut it);
+ match punct.as_char() {
+ ',' => break ident,
+ ':' => {
+ let punct2 = expect_punct(&mut it);
+ assert_eq!(punct2.as_char(), ':');
+ out.extend([
+ TokenTree::Ident(ident),
+ TokenTree::Punct(punct),
+ TokenTree::Punct(punct2),
+ ]);
+ }
+ _ => panic!("Expected , or ::"),
+ }
+ };
+
let b = expect_ident(&mut it);
assert!(it.next().is_none(), "only two idents can be concatenated");
let res = Ident::new(&format!("{a}{b}"), b.span());
- TokenStream::from_iter([TokenTree::Ident(res)])
+ out.extend([TokenTree::Ident(res)]);
+ out
}
diff --git a/rust/macros/helpers.rs b/rust/macros/helpers.rs
index cf7ad950dc1e..517f0bbfcf79 100644
--- a/rust/macros/helpers.rs
+++ b/rust/macros/helpers.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use proc_macro::{token_stream, TokenTree};
+use proc_macro::{token_stream, Group, Punct, TokenTree};
pub(crate) fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
if let Some(TokenTree::Ident(ident)) = it.next() {
@@ -38,9 +38,9 @@ pub(crate) fn expect_ident(it: &mut token_stream::IntoIter) -> String {
try_ident(it).expect("Expected Ident")
}
-pub(crate) fn expect_punct(it: &mut token_stream::IntoIter) -> char {
+pub(crate) fn expect_punct(it: &mut token_stream::IntoIter) -> Punct {
if let TokenTree::Punct(punct) = it.next().expect("Reached end of token stream for Punct") {
- punct.as_char()
+ punct
} else {
panic!("Expected Punct");
}
@@ -56,8 +56,36 @@ pub(crate) fn expect_string_ascii(it: &mut token_stream::IntoIter) -> String {
string
}
+pub(crate) fn expect_literal(it: &mut token_stream::IntoIter) -> String {
+ try_literal(it).expect("Expected Literal")
+}
+
+pub(crate) fn expect_group(it: &mut token_stream::IntoIter) -> Group {
+ if let TokenTree::Group(group) = it.next().expect("Reached end of token stream for Group") {
+ group
+ } else {
+ panic!("Expected Group");
+ }
+}
+
pub(crate) fn expect_end(it: &mut token_stream::IntoIter) {
if it.next().is_some() {
panic!("Expected end");
}
}
+
+pub(crate) fn get_literal(it: &mut token_stream::IntoIter, expected_name: &str) -> String {
+ assert_eq!(expect_ident(it), expected_name);
+ assert_eq!(expect_punct(it), ':');
+ let literal = expect_literal(it);
+ assert_eq!(expect_punct(it), ',');
+ literal
+}
+
+pub(crate) fn get_string(it: &mut token_stream::IntoIter, expected_name: &str) -> String {
+ assert_eq!(expect_ident(it), expected_name);
+ assert_eq!(expect_punct(it), ':');
+ let string = expect_string(it);
+ assert_eq!(expect_punct(it), ',');
+ string
+}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index c1d385e345b9..3ab9bae4ab52 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -5,6 +5,7 @@
mod concat_idents;
mod helpers;
mod module;
+mod versions;
mod vtable;
use proc_macro::TokenStream;
@@ -73,6 +74,12 @@ pub fn module(ts: TokenStream) -> TokenStream {
module::module(ts)
}
+/// Declares multiple variants of a structure or impl code
+#[proc_macro_attribute]
+pub fn versions(attr: TokenStream, item: TokenStream) -> TokenStream {
+ versions::versions(attr, item)
+}
+
/// Declares or implements a vtable trait.
///
/// Linux's use of pure vtables is very close to Rust traits, but they differ
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index a7e363c2b044..1f30435d7ad4 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -1,9 +1,59 @@
// SPDX-License-Identifier: GPL-2.0
use crate::helpers::*;
-use proc_macro::{token_stream, Literal, TokenStream, TokenTree};
+use proc_macro::{token_stream, Delimiter, Group, Literal, TokenStream, TokenTree};
use std::fmt::Write;
+#[derive(Clone, PartialEq)]
+enum ParamType {
+ Ident(String),
+ Array { vals: String, max_length: usize },
+}
+
+fn expect_array_fields(it: &mut token_stream::IntoIter) -> ParamType {
+ assert_eq!(expect_punct(it), '<');
+ let vals = expect_ident(it);
+ assert_eq!(expect_punct(it), ',');
+ let max_length_str = expect_literal(it);
+ let max_length = max_length_str
+ .parse::<usize>()
+ .expect("Expected usize length");
+ assert_eq!(expect_punct(it), '>');
+ ParamType::Array { vals, max_length }
+}
+
+fn expect_type(it: &mut token_stream::IntoIter) -> ParamType {
+ if let TokenTree::Ident(ident) = it
+ .next()
+ .expect("Reached end of token stream for param type")
+ {
+ match ident.to_string().as_ref() {
+ "ArrayParam" => expect_array_fields(it),
+ _ => ParamType::Ident(ident.to_string()),
+ }
+ } else {
+ panic!("Expected Param Type")
+ }
+}
+
+fn expect_string_array(it: &mut token_stream::IntoIter) -> Vec<String> {
+ let group = expect_group(it);
+ assert_eq!(group.delimiter(), Delimiter::Bracket);
+ let mut values = Vec::new();
+ let mut it = group.stream().into_iter();
+
+ while let Some(val) = try_string(&mut it) {
+ assert!(val.is_ascii(), "Expected ASCII string");
+ values.push(val);
+ match it.next() {
+ Some(TokenTree::Punct(punct)) => assert_eq!(punct.as_char(), ','),
+ None => break,
+ _ => panic!("Expected ',' or end of array"),
+ }
+ }
+ values
+}
+
struct ModInfoBuilder<'a> {
module: &'a str,
counter: usize,
@@ -69,6 +119,113 @@ impl<'a> ModInfoBuilder<'a> {
self.emit_only_builtin(field, content);
self.emit_only_loadable(field, content);
}
+
+ fn emit_param(&mut self, field: &str, param: &str, content: &str) {
+ let content = format!("{param}:{content}", param = param, content = content);
+ self.emit(field, &content);
+ }
+}
+
+fn permissions_are_readonly(perms: &str) -> bool {
+ let (radix, digits) = if let Some(n) = perms.strip_prefix("0x") {
+ (16, n)
+ } else if let Some(n) = perms.strip_prefix("0o") {
+ (8, n)
+ } else if let Some(n) = perms.strip_prefix("0b") {
+ (2, n)
+ } else {
+ (10, perms)
+ };
+ match u32::from_str_radix(digits, radix) {
+ Ok(perms) => perms & 0o222 == 0,
+ Err(_) => false,
+ }
+}
+
+fn param_ops_path(param_type: &str) -> &'static str {
+ match param_type {
+ "bool" => "kernel::module_param::PARAM_OPS_BOOL",
+ "i8" => "kernel::module_param::PARAM_OPS_I8",
+ "u8" => "kernel::module_param::PARAM_OPS_U8",
+ "i16" => "kernel::module_param::PARAM_OPS_I16",
+ "u16" => "kernel::module_param::PARAM_OPS_U16",
+ "i32" => "kernel::module_param::PARAM_OPS_I32",
+ "u32" => "kernel::module_param::PARAM_OPS_U32",
+ "i64" => "kernel::module_param::PARAM_OPS_I64",
+ "u64" => "kernel::module_param::PARAM_OPS_U64",
+ "isize" => "kernel::module_param::PARAM_OPS_ISIZE",
+ "usize" => "kernel::module_param::PARAM_OPS_USIZE",
+ "str" => "kernel::module_param::PARAM_OPS_STR",
+ t => panic!("Unrecognized type {}", t),
+ }
+}
+
+#[allow(clippy::type_complexity)]
+fn try_simple_param_val(
+ param_type: &str,
+) -> Box<dyn Fn(&mut token_stream::IntoIter) -> Option<String>> {
+ match param_type {
+ "bool" => Box::new(try_ident),
+ "str" => Box::new(|param_it| {
+ try_string(param_it)
+ .map(|s| format!("kernel::module_param::StringParam::Ref(b\"{}\")", s))
+ }),
+ _ => Box::new(try_literal),
+ }
+}
+
+fn get_default(param_type: &ParamType, param_it: &mut token_stream::IntoIter) -> String {
+ let try_param_val = match param_type {
+ ParamType::Ident(ref param_type)
+ | ParamType::Array {
+ vals: ref param_type,
+ max_length: _,
+ } => try_simple_param_val(param_type),
+ };
+ assert_eq!(expect_ident(param_it), "default");
+ assert_eq!(expect_punct(param_it), ':');
+ let default = match param_type {
+ ParamType::Ident(_) => try_param_val(param_it).expect("Expected default param value"),
+ ParamType::Array {
+ vals: _,
+ max_length: _,
+ } => {
+ let group = expect_group(param_it);
+ assert_eq!(group.delimiter(), Delimiter::Bracket);
+ let mut default_vals = Vec::new();
+ let mut it = group.stream().into_iter();
+
+ while let Some(default_val) = try_param_val(&mut it) {
+ default_vals.push(default_val);
+ match it.next() {
+ Some(TokenTree::Punct(punct)) => assert_eq!(punct.as_char(), ','),
+ None => break,
+ _ => panic!("Expected ',' or end of array default values"),
+ }
+ }
+
+ let mut default_array = "kernel::module_param::ArrayParam::create(&[".to_string();
+ default_array.push_str(
+ &default_vals
+ .iter()
+ .map(|val| val.to_string())
+ .collect::<Vec<String>>()
+ .join(","),
+ );
+ default_array.push_str("])");
+ default_array
+ }
+ };
+ assert_eq!(expect_punct(param_it), ',');
+ default
+}
+
+fn generated_array_ops_name(vals: &str, max_length: usize) -> String {
+ format!(
+ "__generated_array_ops_{vals}_{max_length}",
+ vals = vals,
+ max_length = max_length
+ )
}
#[derive(Debug, Default)]
@@ -78,15 +235,23 @@ struct ModuleInfo {
name: String,
author: Option<String>,
description: Option<String>,
- alias: Option<String>,
+ alias: Option<Vec<String>>,
+ params: Option<Group>,
}
impl ModuleInfo {
fn parse(it: &mut token_stream::IntoIter) -> Self {
let mut info = ModuleInfo::default();
- const EXPECTED_KEYS: &[&str] =
- &["type", "name", "author", "description", "license", "alias"];
+ const EXPECTED_KEYS: &[&str] = &[
+ "type",
+ "name",
+ "author",
+ "description",
+ "license",
+ "alias",
+ "params",
+ ];
const REQUIRED_KEYS: &[&str] = &["type", "name", "license"];
let mut seen_keys = Vec::new();
@@ -104,7 +269,7 @@ impl ModuleInfo {
);
}
- assert_eq!(expect_punct(it), ':');
+ assert_eq!(expect_punct(it).as_char(), ':');
match key.as_str() {
"type" => info.type_ = expect_ident(it),
@@ -112,14 +277,15 @@ impl ModuleInfo {
"author" => info.author = Some(expect_string(it)),
"description" => info.description = Some(expect_string(it)),
"license" => info.license = expect_string_ascii(it),
- "alias" => info.alias = Some(expect_string_ascii(it)),
+ "alias" => info.alias = Some(expect_string_array(it)),
+ "params" => info.params = Some(expect_group(it)),
_ => panic!(
"Unknown key \"{}\". Valid keys are: {:?}.",
key, EXPECTED_KEYS
),
}
- assert_eq!(expect_punct(it), ',');
+ assert_eq!(expect_punct(it).as_char(), ',');
seen_keys.push(key);
}
@@ -163,8 +329,10 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
modinfo.emit("description", &description);
}
modinfo.emit("license", &info.license);
- if let Some(alias) = info.alias {
- modinfo.emit("alias", &alias);
+ if let Some(aliases) = info.alias {
+ for alias in aliases {
+ modinfo.emit("alias", &alias);
+ }
}
// Built-in modules also export the `file` modinfo string.
@@ -172,6 +340,195 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
std::env::var("RUST_MODFILE").expect("Unable to fetch RUST_MODFILE environmental variable");
modinfo.emit_only_builtin("file", &file);
+ let mut array_types_to_generate = Vec::new();
+ if let Some(params) = info.params {
+ assert_eq!(params.delimiter(), Delimiter::Brace);
+
+ let mut it = params.stream().into_iter();
+
+ loop {
+ let param_name = match it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ Some(_) => panic!("Expected Ident or end"),
+ None => break,
+ };
+
+ assert_eq!(expect_punct(&mut it), ':');
+ let param_type = expect_type(&mut it);
+ let group = expect_group(&mut it);
+ assert_eq!(expect_punct(&mut it), ',');
+
+ assert_eq!(group.delimiter(), Delimiter::Brace);
+
+ let mut param_it = group.stream().into_iter();
+ let param_default = get_default(&param_type, &mut param_it);
+ let param_permissions = get_literal(&mut param_it, "permissions");
+ let param_description = get_string(&mut param_it, "description");
+ expect_end(&mut param_it);
+
+ // TODO: More primitive types.
+ // TODO: Other kinds: unsafes, etc.
+ let (param_kernel_type, ops): (String, _) = match param_type {
+ ParamType::Ident(ref param_type) => (
+ param_type.to_string(),
+ param_ops_path(param_type).to_string(),
+ ),
+ ParamType::Array {
+ ref vals,
+ max_length,
+ } => {
+ array_types_to_generate.push((vals.clone(), max_length));
+ (
+ format!("__rust_array_param_{}_{}", vals, max_length),
+ generated_array_ops_name(vals, max_length),
+ )
+ }
+ };
+
+ modinfo.emit_param("parmtype", &param_name, &param_kernel_type);
+ modinfo.emit_param("parm", &param_name, &param_description);
+ let param_type_internal = match param_type {
+ ParamType::Ident(ref param_type) => match param_type.as_ref() {
+ "str" => "kernel::module_param::StringParam".to_string(),
+ other => other.to_string(),
+ },
+ ParamType::Array {
+ ref vals,
+ max_length,
+ } => format!(
+ "kernel::module_param::ArrayParam<{vals}, {max_length}>",
+ vals = vals,
+ max_length = max_length
+ ),
+ };
+ let read_func = if permissions_are_readonly(&param_permissions) {
+ format!(
+ "
+ fn read(&self)
+ -> &<{param_type_internal} as kernel::module_param::ModuleParam>::Value {{
+ // SAFETY: Parameters do not need to be locked because they are
+ // read only or sysfs is not enabled.
+ unsafe {{
+ <{param_type_internal} as kernel::module_param::ModuleParam>::value(
+ &__{name}_{param_name}_value
+ )
+ }}
+ }}
+ ",
+ name = info.name,
+ param_name = param_name,
+ param_type_internal = param_type_internal,
+ )
+ } else {
+ format!(
+ "
+ fn read<'lck>(&self, lock: &'lck kernel::KParamGuard)
+ -> &'lck <{param_type_internal} as kernel::module_param::ModuleParam>::Value {{
+ // SAFETY: Parameters are locked by `KParamGuard`.
+ unsafe {{
+ <{param_type_internal} as kernel::module_param::ModuleParam>::value(
+ &__{name}_{param_name}_value
+ )
+ }}
+ }}
+ ",
+ name = info.name,
+ param_name = param_name,
+ param_type_internal = param_type_internal,
+ )
+ };
+ let kparam = format!(
+ "
+ kernel::bindings::kernel_param__bindgen_ty_1 {{
+ arg: unsafe {{ &__{name}_{param_name}_value }}
+ as *const _ as *mut core::ffi::c_void,
+ }},
+ ",
+ name = info.name,
+ param_name = param_name,
+ );
+ write!(
+ modinfo.buffer,
+ "
+ static mut __{name}_{param_name}_value: {param_type_internal} = {param_default};
+
+ struct __{name}_{param_name};
+
+ impl __{name}_{param_name} {{ {read_func} }}
+
+ const {param_name}: __{name}_{param_name} = __{name}_{param_name};
+
+ // Note: the C macro that generates the static structs for the `__param` section
+ // asks for them to be `aligned(sizeof(void *))`. However, that was put in place
+ // in 2003 in commit 38d5b085d2a0 (\"[PATCH] Fix over-alignment problem on x86-64\")
+ // to undo GCC over-alignment of static structs of >32 bytes. It seems that is
+ // not the case anymore, so we simplify to a transparent representation here
+ // in the expectation that it is not needed anymore.
+ // TODO: Revisit this to confirm the above comment and remove it if it happened.
+ #[repr(transparent)]
+ struct __{name}_{param_name}_RacyKernelParam(kernel::bindings::kernel_param);
+
+ unsafe impl Sync for __{name}_{param_name}_RacyKernelParam {{
+ }}
+
+ #[cfg(not(MODULE))]
+ const __{name}_{param_name}_name: *const core::ffi::c_char =
+ b\"{name}.{param_name}\\0\" as *const _ as *const core::ffi::c_char;
+
+ #[cfg(MODULE)]
+ const __{name}_{param_name}_name: *const core::ffi::c_char =
+ b\"{param_name}\\0\" as *const _ as *const core::ffi::c_char;
+
+ #[link_section = \"__param\"]
+ #[used]
+ static __{name}_{param_name}_struct: __{name}_{param_name}_RacyKernelParam =
+ __{name}_{param_name}_RacyKernelParam(kernel::bindings::kernel_param {{
+ name: __{name}_{param_name}_name,
+ // SAFETY: `__this_module` is constructed by the kernel at load time
+ // and will not be freed until the module is unloaded.
+ #[cfg(MODULE)]
+ mod_: unsafe {{ &kernel::bindings::__this_module as *const _ as *mut _ }},
+ #[cfg(not(MODULE))]
+ mod_: core::ptr::null_mut(),
+ ops: unsafe {{ &{ops} }} as *const kernel::bindings::kernel_param_ops,
+ perm: {permissions},
+ level: -1,
+ flags: 0,
+ __bindgen_anon_1: {kparam}
+ }});
+ ",
+ name = info.name,
+ param_type_internal = param_type_internal,
+ read_func = read_func,
+ param_default = param_default,
+ param_name = param_name,
+ ops = ops,
+ permissions = param_permissions,
+ kparam = kparam,
+ )
+ .unwrap();
+ }
+ }
+
+ let mut generated_array_types = String::new();
+
+ for (vals, max_length) in array_types_to_generate {
+ let ops_name = generated_array_ops_name(&vals, max_length);
+ write!(
+ generated_array_types,
+ "
+ kernel::make_param_ops!(
+ {ops_name},
+ kernel::module_param::ArrayParam<{vals}, {{ {max_length} }}>
+ );
+ ",
+ ops_name = ops_name,
+ vals = vals,
+ max_length = max_length,
+ )
+ .unwrap();
+ }
+
format!(
"
/// The module name.
@@ -250,7 +607,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
}}
fn __init() -> core::ffi::c_int {{
- match <{type_} as kernel::Module>::init(&THIS_MODULE) {{
+ match <{type_} as kernel::Module>::init(kernel::c_str!(\"{name}\"), &THIS_MODULE) {{
Ok(m) => {{
unsafe {{
__MOD = Some(m);
@@ -271,10 +628,13 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
}}
{modinfo}
+
+ {generated_array_types}
",
type_ = info.type_,
name = info.name,
modinfo = modinfo.buffer,
+ generated_array_types = generated_array_types,
initcall_section = ".initcall6.init"
)
.parse()
diff --git a/rust/macros/versions.rs b/rust/macros/versions.rs
new file mode 100644
index 000000000000..6dfdfd7d25c5
--- /dev/null
+++ b/rust/macros/versions.rs
@@ -0,0 +1,289 @@
+use proc_macro::{Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree};
+
+//use crate::helpers::expect_punct;
+
+fn expect_group(it: &mut impl Iterator<Item = TokenTree>) -> Group {
+ if let Some(TokenTree::Group(group)) = it.next() {
+ group
+ } else {
+ panic!("Expected Group")
+ }
+}
+
+fn expect_punct(it: &mut impl Iterator<Item = TokenTree>) -> String {
+ if let Some(TokenTree::Punct(punct)) = it.next() {
+ punct.to_string()
+ } else {
+ panic!("Expected Group")
+ }
+}
+
+fn drop_until_punct(it: &mut impl Iterator<Item = TokenTree>, delimiter: &str) {
+ let mut depth: isize = 0;
+ for token in it.by_ref() {
+ if let TokenTree::Punct(punct) = token {
+ match punct.as_char() {
+ '<' => {
+ depth += 1;
+ }
+ '>' => {
+ depth -= 1;
+ }
+ _ => {
+ if depth == 0 && delimiter.contains(&punct.to_string()) {
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+struct VersionConfig {
+ fields: &'static [&'static str],
+ enums: &'static [&'static [&'static str]],
+ versions: &'static [&'static [&'static str]],
+}
+
+static AGX_VERSIONS: VersionConfig = VersionConfig {
+ fields: &["G", "V"],
+ enums: &[&["G13", "G14"], &["V12_3", "V12_4", "V13_0B4", "V13_2"]],
+ versions: &[
+ &["G13", "V12_3"],
+ &["G14", "V12_4"],
+ // &["G13", "V13_0B4"],
+ // &["G14", "V13_0B4"],
+ &["G13", "V13_2"],
+ &["G14", "V13_2"],
+ ],
+};
+
+fn check_version(
+ config: &VersionConfig,
+ ver: &[usize],
+ it: &mut impl Iterator<Item = TokenTree>,
+) -> bool {
+ let first = it.next().unwrap();
+ let val: bool = match &first {
+ TokenTree::Group(group) => check_version(config, ver, &mut group.stream().into_iter()),
+ TokenTree::Ident(ident) => {
+ let key = config
+ .fields
+ .iter()
+ .position(|&r| r == ident.to_string())
+ .unwrap_or_else(|| panic!("Unknown field {}", ident));
+ let mut operator = expect_punct(it);
+ let mut rhs_token = it.next().unwrap();
+ if let TokenTree::Punct(punct) = &rhs_token {
+ operator.extend(std::iter::once(punct.as_char()));
+ rhs_token = it.next().unwrap();
+ }
+ let rhs_name = if let TokenTree::Ident(ident) = &rhs_token {
+ ident.to_string()
+ } else {
+ panic!("Unexpected token {}", ident)
+ };
+
+ let rhs = config.enums[key]
+ .iter()
+ .position(|&r| r == rhs_name)
+ .unwrap_or_else(|| panic!("Unknown value for {}:{}", ident, rhs_name));
+ let lhs = ver[key];
+
+ match operator.as_str() {
+ "==" => lhs == rhs,
+ "!=" => lhs != rhs,
+ ">" => lhs > rhs,
+ ">=" => lhs >= rhs,
+ "<" => lhs < rhs,
+ "<=" => lhs <= rhs,
+ _ => panic!("Unknown operator {}", operator),
+ }
+ }
+ _ => {
+ panic!("Unknown token {}", first)
+ }
+ };
+
+ let boolop = it.next();
+ match boolop {
+ Some(TokenTree::Punct(punct)) => {
+ let right = expect_punct(it);
+ if right != punct.to_string() {
+ panic!("Unexpected op {}{}", punct, right);
+ }
+ match punct.as_char() {
+ '&' => val && check_version(config, ver, it),
+ '|' => val || check_version(config, ver, it),
+ _ => panic!("Unexpected op {}{}", right, right),
+ }
+ }
+ Some(a) => panic!("Unexpected op {}", a),
+ None => val,
+ }
+}
+
+fn filter_versions(
+ config: &VersionConfig,
+ tag: &str,
+ ver: &[usize],
+ tree: impl IntoIterator<Item = TokenTree>,
+ is_struct: bool,
+) -> Vec<TokenTree> {
+ let mut out = Vec::<TokenTree>::new();
+ let mut it = tree.into_iter();
+
+ while let Some(token) = it.next() {
+ let mut tail: Option<TokenTree> = None;
+ match &token {
+ TokenTree::Punct(punct) if punct.to_string() == "#" => {
+ let group = expect_group(&mut it);
+ let mut grp_it = group.stream().into_iter();
+ let attr = grp_it.next().unwrap();
+ match attr {
+ TokenTree::Ident(ident) if ident.to_string() == "ver" => {
+ if check_version(config, ver, &mut grp_it) {
+ } else if is_struct {
+ drop_until_punct(&mut it, ",");
+ } else {
+ let first = it.next().unwrap();
+ match &first {
+ TokenTree::Group(_) => (),
+ _ => {
+ drop_until_punct(&mut it, ",;");
+ }
+ }
+ }
+ }
+ _ => {
+ out.push(token.clone());
+ out.push(TokenTree::Group(group.clone()));
+ }
+ }
+ continue;
+ }
+ TokenTree::Punct(punct) if punct.to_string() == ":" => {
+ let next = it.next();
+ match next {
+ Some(TokenTree::Punct(punct)) if punct.to_string() == ":" => {
+ let next = it.next();
+ match next {
+ Some(TokenTree::Ident(idtag)) if idtag.to_string() == "ver" => {
+ let ident = match out.pop() {
+ Some(TokenTree::Ident(ident)) => ident,
+ a => panic!("$ver not following ident: {:?}", a),
+ };
+ let name = ident.to_string() + tag;
+ let new_ident = Ident::new(name.as_str(), ident.span());
+ out.push(TokenTree::Ident(new_ident));
+ continue;
+ }
+ Some(a) => {
+ out.push(token.clone());
+ out.push(token.clone());
+ tail = Some(a);
+ }
+ None => {
+ out.push(token.clone());
+ out.push(token.clone());
+ }
+ }
+ }
+ Some(a) => {
+ out.push(token.clone());
+ tail = Some(a);
+ }
+ None => {
+ out.push(token.clone());
+ continue;
+ }
+ }
+ }
+ _ => {
+ tail = Some(token);
+ }
+ }
+ match &tail {
+ Some(TokenTree::Group(group)) => {
+ let new_body =
+ filter_versions(config, tag, ver, &mut group.stream().into_iter(), is_struct);
+ let mut stream = TokenStream::new();
+ stream.extend(new_body);
+ let mut filtered_group = Group::new(group.delimiter(), stream);
+ filtered_group.set_span(group.span());
+ out.push(TokenTree::Group(filtered_group));
+ }
+ Some(token) => {
+ out.push(token.clone());
+ }
+ None => {}
+ }
+ }
+
+ out
+}
+
+pub(crate) fn versions(attr: TokenStream, item: TokenStream) -> TokenStream {
+ let config = match attr.to_string().as_str() {
+ "AGX" => &AGX_VERSIONS,
+ _ => panic!("Unknown version group {}", attr),
+ };
+
+ let mut it = item.into_iter();
+ let mut out = TokenStream::new();
+ let mut body: Vec<TokenTree> = Vec::new();
+ let mut is_struct = false;
+
+ while let Some(token) = it.next() {
+ match token {
+ TokenTree::Punct(punct) if punct.to_string() == "#" => {
+ body.push(TokenTree::Punct(punct));
+ body.push(it.next().unwrap());
+ }
+ TokenTree::Ident(ident)
+ if ["struct", "enum", "union", "const", "type"]
+ .contains(&ident.to_string().as_str()) =>
+ {
+ is_struct = ident.to_string() != "const";
+ body.push(TokenTree::Ident(ident));
+ body.push(it.next().unwrap());
+ // This isn't valid syntax in a struct definition, so add it for the user
+ body.push(TokenTree::Punct(Punct::new(':', Spacing::Joint)));
+ body.push(TokenTree::Punct(Punct::new(':', Spacing::Alone)));
+ body.push(TokenTree::Ident(Ident::new("ver", Span::call_site())));
+ break;
+ }
+ TokenTree::Ident(ident) if ident.to_string() == "impl" => {
+ body.push(TokenTree::Ident(ident));
+ break;
+ }
+ TokenTree::Ident(ident) if ident.to_string() == "fn" => {
+ body.push(TokenTree::Ident(ident));
+ break;
+ }
+ _ => {
+ body.push(token);
+ }
+ }
+ }
+
+ body.extend(it);
+
+ for ver in config.versions {
+ let tag = ver.join("");
+ let mut ver_num = Vec::<usize>::new();
+ for (i, comp) in ver.iter().enumerate() {
+ let idx = config.enums[i].iter().position(|&r| r == *comp).unwrap();
+ ver_num.push(idx);
+ }
+ out.extend(filter_versions(
+ config,
+ &tag,
+ &ver_num,
+ body.clone(),
+ is_struct,
+ ));
+ }
+
+ out
+}