1//! A pool implementation based on buffer slots
23use std::io;
4use std::{
5 os::unix::io::{AsRawFd, OwnedFd},
6 sync::{
7 atomic::{AtomicU8, AtomicUsize, Ordering},
8 Arc, Mutex, Weak,
9 },
10};
1112use wayland_client::{
13 protocol::{wl_buffer, wl_shm, wl_surface},
14 Proxy,
15};
1617use crate::{globals::ProvidesBoundGlobal, shm::raw::RawPool, shm::CreatePoolError};
1819#[derive(Debug, thiserror::Error)]
20pub enum CreateBufferError {
21/// Slot creation error.
22#[error(transparent)]
23Io(#[from] io::Error),
2425/// Pool mismatch.
26#[error("Incorrect pool for slot")]
27PoolMismatch,
2829/// Slot size mismatch
30#[error("Requested buffer size is too large for slot")]
31SlotTooSmall,
32}
3334#[derive(Debug, thiserror::Error)]
35pub enum ActivateSlotError {
36/// Buffer was already active
37#[error("Buffer was already active")]
38AlreadyActive,
39}
4041#[derive(Debug)]
42pub struct SlotPool {
43pub(crate) inner: RawPool,
44 free_list: Arc<Mutex<Vec<FreelistEntry>>>,
45}
4647#[derive(Debug)]
48struct FreelistEntry {
49 offset: usize,
50 len: usize,
51}
5253/// A chunk of memory allocated from a [SlotPool]
54///
55/// Retaining this object is only required if you wish to resize or change the buffer's format
56/// without changing the contents of the backing memory.
57#[derive(Debug)]
58pub struct Slot {
59 inner: Arc<SlotInner>,
60}
6162#[derive(Debug)]
63struct SlotInner {
64 free_list: Weak<Mutex<Vec<FreelistEntry>>>,
65 offset: usize,
66 len: usize,
67 active_buffers: AtomicUsize,
68/// Count of all "real" references to this slot. This includes all Slot objects and any
69 /// BufferData object that is not in the DEAD state. When this reaches zero, the memory for
70 /// this slot will return to the free_list. It is not possible for it to reach zero and have a
71 /// Slot or Buffer referring to it.
72all_refs: AtomicUsize,
73}
7475/// A wrapper around a [`wl_buffer::WlBuffer`] which has been allocated via a [SlotPool].
76///
77/// When this object is dropped, the buffer will be destroyed immediately if it is not active, or
78/// upon the server's release if it is.
79#[derive(Debug)]
80pub struct Buffer {
81 buffer: wl_buffer::WlBuffer,
82 height: i32,
83 stride: i32,
84 slot: Slot,
85}
8687/// ObjectData for the WlBuffer
88#[derive(Debug)]
89struct BufferData {
90 inner: Arc<SlotInner>,
91 state: AtomicU8,
92}
9394// These constants define the value of BufferData::state, since AtomicEnum does not exist.
95impl BufferData {
96/// Buffer is counted in active_buffers list; will return to INACTIVE on Release.
97const ACTIVE: u8 = 0;
9899/// Buffer is not counted in active_buffers list, but also has not been destroyed.
100const INACTIVE: u8 = 1;
101102/// Buffer is counted in active_buffers list; will move to DEAD on Release
103const DESTROY_ON_RELEASE: u8 = 2;
104105/// Buffer has been destroyed
106const DEAD: u8 = 3;
107108/// Value that is ORed on buffer release to transition to the next state
109const RELEASE_SET: u8 = 1;
110111/// Value that is ORed on buffer destroy to transition to the next state
112const DESTROY_SET: u8 = 2;
113114/// Call after successfully transitioning the state to DEAD
115fn record_death(&self) {
116 drop(Slot { inner: self.inner.clone() })
117 }
118}
119120impl SlotPool {
121pub fn new(
122 len: usize,
123 shm: &impl ProvidesBoundGlobal<wl_shm::WlShm, 1>,
124 ) -> Result<Self, CreatePoolError> {
125let inner = RawPool::new(len, shm)?;
126let free_list = Arc::new(Mutex::new(vec![FreelistEntry { offset: 0, len: inner.len() }]));
127Ok(SlotPool { inner, free_list })
128 }
129130/// Create a new buffer in a new slot.
131 ///
132 /// This returns the buffer and the canvas. The parameters are:
133 ///
134 /// - `width`: the width of this buffer (in pixels)
135 /// - `height`: the height of this buffer (in pixels)
136 /// - `stride`: distance (in bytes) between the beginning of a row and the next one
137 /// - `format`: the encoding format of the pixels. Using a format that was not
138 /// advertised to the `wl_shm` global by the server is a protocol error and will
139 /// terminate your connection.
140 ///
141 /// The [Slot] for this buffer will have exactly the size required for the data. It can be
142 /// accessed via [Buffer::slot] to create additional buffers that point to the same data. This
143 /// is required if you wish to change formats, buffer dimensions, or attach a canvas to
144 /// multiple surfaces.
145 ///
146 /// For more control over sizing, use [Self::new_slot] and [Self::create_buffer_in].
147pub fn create_buffer(
148&mut self,
149 width: i32,
150 height: i32,
151 stride: i32,
152 format: wl_shm::Format,
153 ) -> Result<(Buffer, &mut [u8]), CreateBufferError> {
154let len = (height as usize) * (stride as usize);
155let slot = self.new_slot(len)?;
156let buffer = self.create_buffer_in(&slot, width, height, stride, format)?;
157let canvas = self.raw_data_mut(&slot);
158Ok((buffer, canvas))
159 }
160161/// Get the bytes corresponding to a given slot or buffer if drawing to the slot is permitted.
162 ///
163 /// Returns `None` if there are active buffers in the slot or if the slot does not correspond
164 /// to this pool.
165pub fn canvas(&mut self, key: &impl CanvasKey) -> Option<&mut [u8]> {
166 key.canvas(self)
167 }
168169/// Returns the size, in bytes, of this pool.
170#[allow(clippy::len_without_is_empty)]
171pub fn len(&self) -> usize {
172self.inner.len()
173 }
174175/// Resizes the memory pool, notifying the server the pool has changed in size.
176 ///
177 /// This is an optimization; the pool automatically resizes when you allocate new slots.
178pub fn resize(&mut self, size: usize) -> io::Result<()> {
179let old_len = self.inner.len();
180self.inner.resize(size)?;
181let new_len = self.inner.len();
182if old_len == new_len {
183return Ok(());
184 }
185// add the new memory to the freelist
186let mut free = self.free_list.lock().unwrap();
187if let Some(FreelistEntry { offset, len }) = free.last_mut() {
188if *offset + *len == old_len {
189*len += new_len - old_len;
190return Ok(());
191 }
192 }
193 free.push(FreelistEntry { offset: old_len, len: new_len - old_len });
194Ok(())
195 }
196197fn alloc(&mut self, size: usize) -> io::Result<usize> {
198let mut free = self.free_list.lock().unwrap();
199for FreelistEntry { offset, len } in free.iter_mut() {
200if *len >= size {
201let rv = *offset;
202*len -= size;
203*offset += size;
204return Ok(rv);
205 }
206 }
207let mut rv = self.inner.len();
208let mut pop_tail = false;
209if let Some(FreelistEntry { offset, len }) = free.last() {
210if offset + len == self.inner.len() {
211 rv -= len;
212 pop_tail = true;
213 }
214 }
215// resize like Vec::reserve, always at least doubling
216let target = std::cmp::max(rv + size, self.inner.len() * 2);
217self.inner.resize(target)?;
218// adjust the end of the freelist here
219if pop_tail {
220 free.pop();
221 }
222if target > rv + size {
223 free.push(FreelistEntry { offset: rv + size, len: target - rv - size });
224 }
225Ok(rv)
226 }
227228fn free(free_list: &Mutex<Vec<FreelistEntry>>, mut offset: usize, mut len: usize) {
229let mut free = free_list.lock().unwrap();
230let mut nf = Vec::with_capacity(free.len() + 1);
231for &FreelistEntry { offset: ioff, len: ilen } in free.iter() {
232if ioff + ilen == offset {
233 offset = ioff;
234 len += ilen;
235continue;
236 }
237if ioff == offset + len {
238 len += ilen;
239continue;
240 }
241if ioff > offset + len && len != 0 {
242 nf.push(FreelistEntry { offset, len });
243 len = 0;
244 }
245if ilen != 0 {
246 nf.push(FreelistEntry { offset: ioff, len: ilen });
247 }
248 }
249if len != 0 {
250 nf.push(FreelistEntry { offset, len });
251 }
252*free = nf;
253 }
254255/// Create a new slot with the given size in bytes.
256pub fn new_slot(&mut self, mut len: usize) -> io::Result<Slot> {
257 len = (len + 63) & !63;
258let offset = self.alloc(len)?;
259260Ok(Slot {
261 inner: Arc::new(SlotInner {
262 free_list: Arc::downgrade(&self.free_list),
263 offset,
264 len,
265 active_buffers: AtomicUsize::new(0),
266 all_refs: AtomicUsize::new(1),
267 }),
268 })
269 }
270271/// Get the bytes corresponding to a given slot.
272 ///
273 /// Note: prefer using [Self::canvas], which will prevent drawing to a buffer that has not been
274 /// released by the server.
275 ///
276 /// Returns an empty buffer if the slot does not belong to this pool.
277pub fn raw_data_mut(&mut self, slot: &Slot) -> &mut [u8] {
278if slot.inner.free_list.as_ptr() == Arc::as_ptr(&self.free_list) {
279&mut self.inner.mmap()[slot.inner.offset..][..slot.inner.len]
280 } else {
281&mut []
282 }
283 }
284285/// Create a new buffer corresponding to a slot.
286 ///
287 /// The parameters are:
288 ///
289 /// - `width`: the width of this buffer (in pixels)
290 /// - `height`: the height of this buffer (in pixels)
291 /// - `stride`: distance (in bytes) between the beginning of a row and the next one
292 /// - `format`: the encoding format of the pixels. Using a format that was not
293 /// advertised to the `wl_shm` global by the server is a protocol error and will
294 /// terminate your connection
295pub fn create_buffer_in(
296&mut self,
297 slot: &Slot,
298 width: i32,
299 height: i32,
300 stride: i32,
301 format: wl_shm::Format,
302 ) -> Result<Buffer, CreateBufferError> {
303let offset = slot.inner.offset as i32;
304let len = (height as usize) * (stride as usize);
305if len > slot.inner.len {
306return Err(CreateBufferError::SlotTooSmall);
307 }
308309if slot.inner.free_list.as_ptr() != Arc::as_ptr(&self.free_list) {
310return Err(CreateBufferError::PoolMismatch);
311 }
312313let slot = slot.clone();
314// take a ref for the BufferData, which will be destroyed by BufferData::record_death
315slot.inner.all_refs.fetch_add(1, Ordering::Relaxed);
316let data = Arc::new(BufferData {
317 inner: slot.inner.clone(),
318 state: AtomicU8::new(BufferData::INACTIVE),
319 });
320let buffer = self.inner.create_buffer_raw(offset, width, height, stride, format, data);
321Ok(Buffer { buffer, height, stride, slot })
322 }
323}
324325impl Clone for Slot {
326fn clone(&self) -> Self {
327let inner = self.inner.clone();
328 inner.all_refs.fetch_add(1, Ordering::Relaxed);
329 Slot { inner }
330 }
331}
332333impl Drop for Slot {
334fn drop(&mut self) {
335if self.inner.all_refs.fetch_sub(1, Ordering::Relaxed) == 1 {
336if let Some(free_list) = self.inner.free_list.upgrade() {
337 SlotPool::free(&free_list, self.inner.offset, self.inner.len);
338 }
339 }
340 }
341}
342343impl Drop for SlotInner {
344fn drop(&mut self) {
345debug_assert_eq!(*self.all_refs.get_mut(), 0);
346 }
347}
348349/// A helper trait for [SlotPool::canvas].
350pub trait CanvasKey {
351fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]>;
352}
353354impl Slot {
355/// Return true if there are buffers referencing this slot whose contents are being accessed
356 /// by the server.
357pub fn has_active_buffers(&self) -> bool {
358self.inner.active_buffers.load(Ordering::Relaxed) != 0
359}
360361/// Returns the size, in bytes, of this slot.
362#[allow(clippy::len_without_is_empty)]
363pub fn len(&self) -> usize {
364self.inner.len
365 }
366367/// Get the bytes corresponding to a given slot if drawing to the slot is permitted.
368 ///
369 /// Returns `None` if there are active buffers in the slot or if the slot does not correspond
370 /// to this pool.
371pub fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
372if self.has_active_buffers() {
373return None;
374 }
375if self.inner.free_list.as_ptr() == Arc::as_ptr(&pool.free_list) {
376Some(&mut pool.inner.mmap()[self.inner.offset..][..self.inner.len])
377 } else {
378None
379}
380 }
381}
382383impl CanvasKey for Slot {
384fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
385self.canvas(pool)
386 }
387}
388389impl Buffer {
390/// Attach a buffer to a surface.
391 ///
392 /// This marks the slot as active until the server releases the buffer, which will happen
393 /// automatically assuming the surface is committed without attaching a different buffer.
394 ///
395 /// Note: if you need to ensure that [`canvas()`](Buffer::canvas) calls never return data that
396 /// could be attached to a surface in a multi-threaded client, make this call while you have
397 /// exclusive access to the corresponding [`SlotPool`].
398pub fn attach_to(&self, surface: &wl_surface::WlSurface) -> Result<(), ActivateSlotError> {
399self.activate()?;
400 surface.attach(Some(&self.buffer), 0, 0);
401Ok(())
402 }
403404/// Get the inner buffer.
405pub fn wl_buffer(&self) -> &wl_buffer::WlBuffer {
406&self.buffer
407 }
408409pub fn height(&self) -> i32 {
410self.height
411 }
412413pub fn stride(&self) -> i32 {
414self.stride
415 }
416417fn data(&self) -> Option<&BufferData> {
418self.buffer.object_data()?.downcast_ref()
419 }
420421/// Get the bytes corresponding to this buffer if drawing is permitted.
422 ///
423 /// This may be smaller than the canvas associated with the slot.
424pub fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
425let len = (self.height as usize) * (self.stride as usize);
426if self.slot.inner.active_buffers.load(Ordering::Relaxed) != 0 {
427return None;
428 }
429if self.slot.inner.free_list.as_ptr() == Arc::as_ptr(&pool.free_list) {
430Some(&mut pool.inner.mmap()[self.slot.inner.offset..][..len])
431 } else {
432None
433}
434 }
435436/// Get the slot corresponding to this buffer.
437pub fn slot(&self) -> Slot {
438self.slot.clone()
439 }
440441/// Manually mark a buffer as active.
442 ///
443 /// An active buffer prevents drawing on its slot until a Release event is received or until
444 /// manually deactivated.
445pub fn activate(&self) -> Result<(), ActivateSlotError> {
446let data = self.data().expect("UserData type mismatch");
447448// This bitwise AND will transition INACTIVE -> ACTIVE, or do nothing if the buffer was
449 // already ACTIVE. No other ordering is required, as the server will not send a Release
450 // until we send our attach after returning Ok.
451match data.state.fetch_and(!BufferData::RELEASE_SET, Ordering::Relaxed) {
452 BufferData::INACTIVE => {
453 data.inner.active_buffers.fetch_add(1, Ordering::Relaxed);
454Ok(())
455 }
456 BufferData::ACTIVE => Err(ActivateSlotError::AlreadyActive),
457_ => unreachable!("Invalid state in BufferData"),
458 }
459 }
460461/// Manually mark a buffer as inactive.
462 ///
463 /// This should be used when the buffer was manually marked as active or when a buffer was
464 /// attached to a surface but not committed. Calling this function on a buffer that was
465 /// committed to a surface risks making the surface contents undefined.
466pub fn deactivate(&self) -> Result<(), ActivateSlotError> {
467let data = self.data().expect("UserData type mismatch");
468469// Same operation as the Release event, but we know the Buffer was not dropped.
470match data.state.fetch_or(BufferData::RELEASE_SET, Ordering::Relaxed) {
471 BufferData::ACTIVE => {
472 data.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
473Ok(())
474 }
475 BufferData::INACTIVE => Err(ActivateSlotError::AlreadyActive),
476_ => unreachable!("Invalid state in BufferData"),
477 }
478 }
479}
480481impl CanvasKey for Buffer {
482fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> {
483self.canvas(pool)
484 }
485}
486487impl Drop for Buffer {
488fn drop(&mut self) {
489if let Some(data) = self.data() {
490match data.state.fetch_or(BufferData::DESTROY_SET, Ordering::Relaxed) {
491 BufferData::ACTIVE => {
492// server is using the buffer, let ObjectData handle the destroy
493}
494 BufferData::INACTIVE => {
495 data.record_death();
496self.buffer.destroy();
497 }
498_ => unreachable!("Invalid state in BufferData"),
499 }
500 }
501 }
502}
503504impl wayland_client::backend::ObjectData for BufferData {
505fn event(
506self: Arc<Self>,
507 handle: &wayland_client::backend::Backend,
508 msg: wayland_backend::protocol::Message<wayland_backend::client::ObjectId, OwnedFd>,
509 ) -> Option<Arc<dyn wayland_backend::client::ObjectData>> {
510debug_assert!(wayland_client::backend::protocol::same_interface(
511 msg.sender_id.interface(),
512 wl_buffer::WlBuffer::interface()
513 ));
514debug_assert!(msg.opcode == 0);
515516match self.state.fetch_or(BufferData::RELEASE_SET, Ordering::Relaxed) {
517 BufferData::ACTIVE => {
518self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
519 }
520 BufferData::INACTIVE => {
521// possible spurious release, or someone called deactivate incorrectly
522log::debug!("Unexpected WlBuffer::Release on an inactive buffer");
523 }
524 BufferData::DESTROY_ON_RELEASE => {
525self.record_death();
526self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
527528// The Destroy message is identical to Release message (no args, same ID), so just reply
529handle
530 .send_request(msg.map_fd(|x| x.as_raw_fd()), None, None)
531 .expect("Unexpected invalid ID");
532 }
533 BufferData::DEAD => {
534// no-op, this object is already unusable
535}
536_ => unreachable!("Invalid state in BufferData"),
537 }
538539None
540}
541542fn destroyed(&self, _: wayland_backend::client::ObjectId) {}
543}
544545impl Drop for BufferData {
546fn drop(&mut self) {
547let state = *self.state.get_mut();
548if state == BufferData::ACTIVE || state == BufferData::DESTROY_ON_RELEASE {
549// Release the active-buffer count
550self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed);
551 }
552553if state != BufferData::DEAD {
554// nobody has ever transitioned state to DEAD, so we are responsible for freeing the
555 // extra reference
556self.record_death();
557 }
558 }
559}