evering_ipc/
shm.rs

1mod boxed;
2
3use std::alloc::Layout;
4use std::cell::RefCell;
5use std::fmt;
6use std::marker::PhantomData;
7use std::mem::MaybeUninit;
8use std::num::{NonZero, NonZeroUsize};
9use std::os::fd::BorrowedFd;
10use std::ptr::NonNull;
11use std::sync::atomic::{AtomicBool, Ordering};
12
13use anyhow::Context;
14use evering::uring::{Header as UringHeader, RawUring};
15use rlsf::Tlsf;
16
17pub use self::boxed::{ShmBox, init_client, init_server};
18use crate::Result;
19
20/// [`ShmHeader`] contains necessary metadata of a shared memory region.
21///
22/// Memory layout of the entire shared memory is illustrated as below,
23///
24/// ```svgbob
25/// .-----------------------------------------------------------------------------.
26/// |                   |               |                   |                     |
27/// | [1] uring offsets | [2] allocator | [3] uring buffers | [4] free memory ... |
28/// | ^                 |               |                   |                   ^ |
29/// '-|-------------------------------------------------------------------------|-'
30///   '-- start of the shared memory (page aligned)                             |
31///                                                  end of the shared memory --'
32/// ```
33///
34/// 1. Uring offsets are used to build [`RawUring`].
35/// 2. Each shared memory region comes with one single-thread allocator.
36///    Typically, it will be taken by the client after initialization.
37/// 3. The submitted requests and responses are stored in these buffers.
38/// 4. The rest of the shared memory are managed by the allocator. [`ShmBox`]
39///    provides similar APIS to [`Box`], but it is allocated and deallocated by
40///    the shared memory [`Allocator`] instead of the global allocator.
41pub struct ShmHeader<A = crate::op::Sqe, B = crate::op::Rqe, Ext = ()> {
42    header: UringHeader<Ext>,
43    // Relative offsets of uring buffers
44    buf_a: usize,
45    buf_b: usize,
46    allocator_taken: AtomicBool,
47    allocator: Allocator, // Max block size: 32 << 24 = 512MB
48    free_memory: (usize, usize),
49    marker: PhantomData<(A, B)>,
50}
51
52impl<A, B, Ext> ShmHeader<A, B, Ext> {
53    /// # Safety
54    ///
55    /// The given `fd` must be valid for the remaining lifetime of the running
56    /// program.
57    pub unsafe fn create(
58        fd: BorrowedFd,
59        size: usize,
60        header: UringHeader<Ext>,
61    ) -> Result<NonNull<Self>> {
62        // Calculate offsets
63        let mut cur = size_of::<Self>();
64
65        let layout_a = Layout::array::<A>(header.size_a()).unwrap();
66        let buf_a = align_up(cur, layout_a.align());
67        cur = buf_a + layout_a.size();
68
69        let layout_b = Layout::array::<B>(header.size_b()).unwrap();
70        let buf_b = align_up(cur, layout_b.align());
71        cur = buf_b + layout_b.size();
72
73        assert!(cur < size, "capacity of shared memory is too small");
74
75        // Initialize shared memory and the uring buffers
76        unsafe {
77            shm_grow(fd, size)?;
78            let this = shm_mmap(fd, size, 0)?.cast::<Self>();
79
80            this.write(Self {
81                header,
82                buf_a,
83                buf_b,
84                allocator_taken: AtomicBool::new(false),
85                allocator: Allocator::new(),
86                free_memory: (cur, size),
87                marker: PhantomData,
88            });
89
90            Ok(this)
91        }
92    }
93
94    /// # Safety
95    ///
96    /// The given `fd` must be valid for the remaining lifetime of the running
97    /// program.
98    pub unsafe fn open(fd: BorrowedFd, size: usize) -> Result<NonNull<Self>> {
99        assert_eq!(
100            nix::sys::stat::fstat(fd)
101                .context("failed to read shmfd")?
102                .st_size as i64,
103            size as i64
104        );
105        unsafe { shm_mmap(fd, size, 0).map(NonNull::cast) }
106    }
107
108    /// # Safety
109    ///
110    /// The supplied `ptr` and `size` must match the previous `mmap` call.
111    pub unsafe fn close(ptr: NonNull<Self>, size: usize) -> Result<()> {
112        unsafe {
113            nix::sys::mman::munmap(ptr.cast(), size).context("failed to munmap shared memory")
114        }
115    }
116
117    pub fn build_raw_uring(&self) -> RawUring<A, B, Ext> {
118        let mut raw = RawUring::<A, B, Ext>::dangling();
119        unsafe {
120            let start = self.start_ptr();
121            raw.header = NonNull::from(&self.header);
122            raw.buf_a = start.byte_add(self.buf_a).cast();
123            raw.buf_b = start.byte_add(self.buf_b).cast();
124        }
125        raw
126    }
127
128    pub fn get_allocator(&self) -> &Allocator {
129        if self.allocator_taken.swap(true, Ordering::Acquire) {
130            panic!("allocator has been taken");
131        }
132        unsafe {
133            let (data_start, data_end) = self.free_memory;
134            let data = self.start_ptr().byte_add(data_start);
135            let block = NonNull::slice_from_raw_parts(data, data_end - data_start);
136
137            tracing::info!(
138                "added free memory, addr={data:#x?}, size={}",
139                bytesize::ByteSize(block.len() as u64).display().iec_short()
140            );
141            self.allocator
142                .tlsf
143                .borrow_mut()
144                .append_free_block_ptr(block);
145        }
146        &self.allocator
147    }
148
149    pub fn get_shm<T: ?Sized>(&self, ptr: NonNull<T>) -> ShmToken<T> {
150        let start = self.start_addr().get();
151        let addr = ptr.addr().get();
152        assert!(addr > start);
153        let shm = NonZeroUsize::new(addr - start).unwrap();
154        ShmToken(ptr.with_addr(shm))
155    }
156
157    /// # Safety
158    ///
159    /// The given `shm` must belong to this memory region.
160    pub fn get_ptr<T: ?Sized>(&self, shm: ShmToken<T>) -> NonNull<T> {
161        let start = self.start_addr().get();
162        unsafe { shm.0.byte_add(start) }
163    }
164
165    fn start_addr(&self) -> NonZeroUsize {
166        self.start_ptr().addr()
167    }
168
169    fn start_ptr(&self) -> NonNull<u8> {
170        NonNull::from(self).cast()
171    }
172}
173
174pub struct Allocator {
175    /// Max block size is `32 << 24 = 512MB`
176    tlsf: RefCell<Tlsf<'static, u32, u32, 24, 8>>,
177}
178
179impl Allocator {
180    const fn new() -> Self {
181        Self {
182            tlsf: RefCell::new(Tlsf::new()),
183        }
184    }
185
186    pub fn alloc<T>(&self, val: T) -> NonNull<T> {
187        unsafe {
188            let mut ptr = self.alloc_uninit();
189            ptr.as_mut().write(val);
190            ptr.cast()
191        }
192    }
193
194    pub fn alloc_uninit<T>(&self) -> NonNull<MaybeUninit<T>> {
195        unsafe { self.alloc_raw(Layout::new::<T>()).cast() }
196    }
197
198    pub fn alloc_slice_copied<T: Copy>(&self, src: &[T]) -> NonNull<[T]> {
199        unsafe {
200            let mut ptr = self.alloc_slice_uninit(src.len());
201            let src_uninit = src as *const [T] as *const [MaybeUninit<T>];
202            ptr.as_mut().copy_from_slice(&*src_uninit);
203            NonNull::new_unchecked(ptr.as_ptr() as *mut [T])
204        }
205    }
206
207    pub fn alloc_slice_filled<T: Copy>(&self, val: T, n: usize) -> NonNull<[T]> {
208        unsafe {
209            let mut ptr = self.alloc_slice_uninit(n);
210            ptr.as_mut().fill(MaybeUninit::new(val));
211            NonNull::new_unchecked(ptr.as_ptr() as *mut [T])
212        }
213    }
214
215    pub fn alloc_slice_uninit<T>(&self, n: usize) -> NonNull<[MaybeUninit<T>]> {
216        unsafe {
217            let data = self.alloc_raw(Layout::array::<T>(n).unwrap());
218            NonNull::slice_from_raw_parts(data.cast(), n)
219        }
220    }
221
222    /// # Safety
223    ///
224    /// The given `ptr` must belong to this allocator.
225    pub unsafe fn dealloc<T: ?Sized>(&self, ptr: NonNull<T>) {
226        unsafe { self.dealloc_raw(ptr.cast(), Layout::for_value(ptr.as_ref())) }
227    }
228
229    unsafe fn alloc_raw(&self, layout: Layout) -> NonNull<u8> {
230        assert_ne!(layout.size(), 0);
231        self.tlsf
232            .borrow_mut()
233            .allocate(layout)
234            .unwrap_or_else(|| panic!("failed to allocate in shared memory"))
235    }
236
237    unsafe fn dealloc_raw(&self, ptr: NonNull<u8>, layout: Layout) {
238        assert_ne!(layout.size(), 0);
239        unsafe { self.tlsf.borrow_mut().deallocate(ptr, layout.align()) }
240    }
241}
242
243pub struct ShmToken<T: ?Sized>(NonNull<T>);
244
245impl<T: ?Sized> ShmToken<T> {
246    pub fn as_ptr(&self) -> NonNull<T> {
247        boxed::ShmHandle::get().get_ptr(*self)
248    }
249}
250
251impl<T: ?Sized> fmt::Debug for ShmToken<T> {
252    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
253        f.debug_tuple("ShmToken")
254            .field(&self.0.cast::<()>())
255            .finish()
256    }
257}
258
259impl<T: ?Sized> Clone for ShmToken<T> {
260    fn clone(&self) -> Self {
261        *self
262    }
263}
264impl<T: ?Sized> Copy for ShmToken<T> {}
265
266unsafe fn shm_mmap(fd: BorrowedFd, len: usize, offset: usize) -> Result<NonNull<u8>> {
267    use nix::sys::mman::{MapFlags, ProtFlags};
268    unsafe {
269        nix::sys::mman::mmap(
270            None,
271            NonZero::new(len).unwrap(),
272            ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
273            MapFlags::MAP_SHARED,
274            fd,
275            offset as i64,
276        )
277        .map(NonNull::cast::<u8>)
278        .context("failed to mmap shared memory")
279    }
280}
281
282fn shm_grow(fd: BorrowedFd, new_len: usize) -> Result<()> {
283    nix::unistd::ftruncate(fd, new_len as i64).context("failed to grow shared memory")
284}
285
286const fn align_up(n: usize, align: usize) -> usize {
287    debug_assert!(align.is_power_of_two());
288    (n + align - 1) & !(align - 1)
289}