ngx/core/
slab.rs

1//! Wrapper for the nginx slab pool allocator.
2//!
3//! See <https://nginx.org/en/docs/dev/development_guide.html#shared_memory>.
4use core::alloc::Layout;
5use core::cmp;
6use core::ptr::{self, NonNull};
7
8use nginx_sys::{
9    ngx_shm_zone_t, ngx_shmtx_lock, ngx_shmtx_unlock, ngx_slab_alloc_locked, ngx_slab_free_locked,
10    ngx_slab_pool_t,
11};
12
13use crate::allocator::{dangling_for_layout, AllocError, Allocator};
14
15/// Non-owning wrapper for an [`ngx_slab_pool_t`] pointer, providing methods for working with
16/// shared memory slab pools.
17///
18/// See <https://nginx.org/en/docs/dev/development_guide.html#shared_memory>.
19#[derive(Clone, Debug)]
20pub struct SlabPool(NonNull<ngx_slab_pool_t>);
21
22unsafe impl Send for SlabPool {}
23unsafe impl Sync for SlabPool {}
24
25unsafe impl Allocator for SlabPool {
26    #[inline]
27    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
28        self.lock().allocate(layout)
29    }
30
31    #[inline]
32    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
33        self.lock().deallocate(ptr, layout)
34    }
35
36    #[inline]
37    fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
38        self.lock().allocate_zeroed(layout)
39    }
40
41    #[inline]
42    unsafe fn grow(
43        &self,
44        ptr: NonNull<u8>,
45        old_layout: Layout,
46        new_layout: Layout,
47    ) -> Result<NonNull<[u8]>, AllocError> {
48        self.lock().grow(ptr, old_layout, new_layout)
49    }
50
51    #[inline]
52    unsafe fn grow_zeroed(
53        &self,
54        ptr: NonNull<u8>,
55        old_layout: Layout,
56        new_layout: Layout,
57    ) -> Result<NonNull<[u8]>, AllocError> {
58        self.lock().grow_zeroed(ptr, old_layout, new_layout)
59    }
60
61    #[inline]
62    unsafe fn shrink(
63        &self,
64        ptr: NonNull<u8>,
65        old_layout: Layout,
66        new_layout: Layout,
67    ) -> Result<NonNull<[u8]>, AllocError> {
68        self.lock().shrink(ptr, old_layout, new_layout)
69    }
70}
71
72impl AsRef<ngx_slab_pool_t> for SlabPool {
73    #[inline]
74    fn as_ref(&self) -> &ngx_slab_pool_t {
75        // SAFETY: this wrapper should be constructed with a valid pointer to ngx_slab_pool_t
76        unsafe { self.0.as_ref() }
77    }
78}
79
80impl AsMut<ngx_slab_pool_t> for SlabPool {
81    #[inline]
82    fn as_mut(&mut self) -> &mut ngx_slab_pool_t {
83        // SAFETY: this wrapper should be constructed with a valid pointer to ngx_slab_pool_t
84        unsafe { self.0.as_mut() }
85    }
86}
87
88impl SlabPool {
89    /// Creates a new `SlabPool` from an initialized shared zone.
90    ///
91    /// # Safety
92    ///
93    /// Shared zones are initialized and safe to use:
94    ///  * between the zone init callback and configuration reload in the master process
95    ///  * during the whole lifetime of a worker process.
96    ///
97    /// After the configuration reload (notably, in the cycle pool cleanup handlers), zone addresses
98    /// in the old cycle may become unmapped.
99    pub unsafe fn from_shm_zone(shm_zone: &ngx_shm_zone_t) -> Option<Self> {
100        let ptr = NonNull::new(shm_zone.shm.addr)?.cast();
101        Some(Self(ptr))
102    }
103
104    /// Locks the slab pool mutex.
105    #[inline]
106    pub fn lock(&self) -> LockedSlabPool {
107        let shpool = self.0.as_ptr();
108        unsafe { ngx_shmtx_lock(ptr::addr_of_mut!((*shpool).mutex)) };
109        LockedSlabPool(self.0)
110    }
111}
112
113/// Wrapper for a locked [`ngx_slab_pool_t`] pointer.
114pub struct LockedSlabPool(NonNull<ngx_slab_pool_t>);
115
116unsafe impl Allocator for LockedSlabPool {
117    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
118        if layout.size() == 0 {
119            return Ok(NonNull::slice_from_raw_parts(
120                dangling_for_layout(&layout),
121                layout.size(),
122            ));
123        }
124
125        // Small slab allocations (size <= ngx_pagesize / 2) are always aligned to the size rounded
126        // up to the nearest power of 2.
127        // If the requested alignment exceeds size, we can guarantee the alignment by allocating
128        // `align()` bytes.
129        let size = cmp::max(layout.size(), layout.align());
130
131        let ptr = unsafe { ngx_slab_alloc_locked(self.0.as_ptr(), size) };
132        let ptr = NonNull::new(ptr.cast()).ok_or(AllocError)?;
133
134        if ptr.align_offset(layout.align()) != 0 {
135            unsafe { self.deallocate(ptr, layout) };
136            return Err(AllocError);
137        }
138
139        Ok(NonNull::slice_from_raw_parts(ptr, layout.size()))
140    }
141
142    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
143        if layout.size() != 0 {
144            ngx_slab_free_locked(self.0.as_ptr(), ptr.as_ptr().cast())
145        }
146    }
147}
148
149impl Drop for LockedSlabPool {
150    fn drop(&mut self) {
151        let shpool = unsafe { self.0.as_mut() };
152        unsafe { ngx_shmtx_unlock(&mut shpool.mutex) }
153    }
154}