1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
// SPDX-License-Identifier: GPL-2.0
//! Kernel page allocation and management.
//!
//! TODO: This module is a work in progress.
use crate::{
bindings, error::code::*, io_buffer::IoBufferReader, user_ptr::UserSlicePtrReader, Result,
PAGE_SIZE,
};
use core::{marker::PhantomData, ptr};
/// A set of physical pages.
///
/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
/// const allows the struct to have the same size as a pointer.
///
/// # Invariants
///
/// The pointer `Pages::pages` is valid and points to 2^ORDER pages.
pub struct Pages<const ORDER: u32> {
pub(crate) pages: *mut bindings::page,
}
impl<const ORDER: u32> Pages<ORDER> {
/// Allocates a new set of contiguous pages.
pub fn new() -> Result<Self> {
// TODO: Consider whether we want to allow callers to specify flags.
// SAFETY: This only allocates pages. We check that it succeeds in the next statement.
let pages = unsafe {
bindings::alloc_pages(
bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
ORDER,
)
};
if pages.is_null() {
return Err(ENOMEM);
}
// INVARIANTS: We checked that the allocation above succeeded.
Ok(Self { pages })
}
/// Copies data from the given [`UserSlicePtrReader`] into the pages.
pub fn copy_into_page(
&self,
reader: &mut UserSlicePtrReader,
offset: usize,
len: usize,
) -> Result {
// TODO: For now this only works on the first page.
let end = offset.checked_add(len).ok_or(EINVAL)?;
if end > PAGE_SIZE {
return Err(EINVAL);
}
let mapping = self.kmap(0).ok_or(EINVAL)?;
// SAFETY: We ensured that the buffer was valid with the check above.
unsafe { reader.read_raw((mapping.ptr as usize + offset) as _, len) }?;
Ok(())
}
/// Maps the pages and reads from them into the given buffer.
///
/// # Safety
///
/// Callers must ensure that the destination buffer is valid for the given length.
/// Additionally, if the raw buffer is intended to be recast, they must ensure that the data
/// can be safely cast; [`crate::io_buffer::ReadableFromBytes`] has more details about it.
pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
// TODO: For now this only works on the first page.
let end = offset.checked_add(len).ok_or(EINVAL)?;
if end > PAGE_SIZE {
return Err(EINVAL);
}
let mapping = self.kmap(0).ok_or(EINVAL)?;
unsafe { ptr::copy((mapping.ptr as *mut u8).add(offset), dest, len) };
Ok(())
}
/// Maps the pages and writes into them from the given buffer.
///
/// # Safety
///
/// Callers must ensure that the buffer is valid for the given length. Additionally, if the
/// page is (or will be) mapped by userspace, they must ensure that no kernel data is leaked
/// through padding if it was cast from another type; [`crate::io_buffer::WritableToBytes`] has
/// more details about it.
pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
// TODO: For now this only works on the first page.
let end = offset.checked_add(len).ok_or(EINVAL)?;
if end > PAGE_SIZE {
return Err(EINVAL);
}
let mapping = self.kmap(0).ok_or(EINVAL)?;
unsafe { ptr::copy(src, (mapping.ptr as *mut u8).add(offset), len) };
Ok(())
}
/// Maps the page at index `index`.
fn kmap(&self, index: usize) -> Option<PageMapping<'_>> {
if index >= 1usize << ORDER {
return None;
}
// SAFETY: We checked above that `index` is within range.
let page = unsafe { self.pages.add(index) };
// SAFETY: `page` is valid based on the checks above.
let ptr = unsafe { bindings::kmap(page) };
if ptr.is_null() {
return None;
}
Some(PageMapping {
page,
ptr,
_phantom: PhantomData,
})
}
}
impl<const ORDER: u32> Drop for Pages<ORDER> {
fn drop(&mut self) {
// SAFETY: By the type invariants, we know the pages are allocated with the given order.
unsafe { bindings::__free_pages(self.pages, ORDER) };
}
}
struct PageMapping<'a> {
page: *mut bindings::page,
ptr: *mut core::ffi::c_void,
_phantom: PhantomData<&'a i32>,
}
impl Drop for PageMapping<'_> {
fn drop(&mut self) {
// SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
// page, so it is safe to unmap it here.
unsafe { bindings::kunmap(self.page) };
}
}
|