imago/
io_buffers.rs

1//! Types for I/O buffers.
2//!
3//! This module provides:
4//! - buffer types that can be allocated with arbitrary alignment,
5//! - references to buffers that more or less ensure the content is read only once (because it can
6//!   change for buffers owned by VM guests),
7//! - buffer vector types.
8
9use crate::macros::passthrough_trait_fn;
10#[cfg(feature = "vm-memory")]
11use crate::misc_helpers::ImagoAsRef;
12use std::alloc::{self, GlobalAlloc};
13use std::fmt::{self, Debug, Formatter};
14use std::io::{IoSlice, IoSliceMut};
15use std::marker::PhantomData;
16#[cfg(unix)]
17use std::mem;
18use std::mem::{size_of, size_of_val};
19use std::ops::Range;
20use std::{cmp, io, ptr, slice};
21
22/// Owned memory buffer.
23pub struct IoBuffer {
24    /// Raw pointer to the start of the buffer.
25    pointer: *mut u8,
26
27    /// Size in bytes.
28    size: usize,
29
30    /// Allocation layout.  `None` only for null buffers.
31    layout: Option<alloc::Layout>,
32}
33
34/// Reference to any immutable memory buffer.
35pub struct IoBufferRef<'a> {
36    /// Raw pointer to the start of the buffer.
37    pointer: *const u8,
38
39    /// Size in bytes.
40    size: usize,
41
42    /// Lifetime marker.
43    _lifetime: PhantomData<&'a [u8]>,
44}
45
46/// Reference to any mutable memory buffer.
47pub struct IoBufferMut<'a> {
48    /// Raw pointer to the start of the buffer.
49    pointer: *mut u8,
50
51    /// Size in bytes.
52    size: usize,
53
54    /// Lifetime marker.
55    _lifetime: PhantomData<&'a mut [u8]>,
56}
57
58// Blocked because of the pointer, but we want this to be usable across threads
59unsafe impl Send for IoBuffer {}
60unsafe impl Sync for IoBuffer {}
61unsafe impl Send for IoBufferRef<'_> {}
62unsafe impl Sync for IoBufferRef<'_> {}
63unsafe impl Send for IoBufferMut<'_> {}
64unsafe impl Sync for IoBufferMut<'_> {}
65
66impl IoBuffer {
67    /// Create a new owned buffer, containing uninitialized data.
68    ///
69    /// Do note that the returned buffer contains uninitialized data, which however is perfectly
70    /// fine for an I/O buffer.
71    pub fn new(size: usize, alignment: usize) -> io::Result<Self> {
72        let layout = alloc::Layout::from_size_align(size, alignment).map_err(io::Error::other)?;
73        Self::new_with_layout(layout)
74    }
75
76    /// Create a new owned buffer, containing uninitialized data, with the given `layout`.
77    pub fn new_with_layout(layout: alloc::Layout) -> io::Result<Self> {
78        if layout.size() == 0 {
79            return Ok(IoBuffer {
80                pointer: ptr::null_mut(),
81                size: 0,
82                layout: None,
83            });
84        }
85
86        // We guarantee the size not to be 0 and do not care about the memory being uninitialized,
87        // so this is safe
88        let pointer = unsafe { alloc::System.alloc(layout) };
89
90        if pointer.is_null() {
91            return Err(io::Error::new(
92                io::ErrorKind::OutOfMemory,
93                format!(
94                    "Failed to allocate memory (size={}, alignment={})",
95                    layout.size(),
96                    layout.align(),
97                ),
98            ));
99        }
100
101        Ok(IoBuffer {
102            pointer,
103            size: layout.size(),
104            layout: Some(layout),
105        })
106    }
107
108    /// Length in bytes.
109    pub fn len(&self) -> usize {
110        self.size
111    }
112
113    /// Whether this is a null buffer (length is 0).
114    pub fn is_empty(&self) -> bool {
115        self.size == 0
116    }
117
118    /// Generate an immutable reference.
119    pub fn as_ref(&self) -> IoBufferRef<'_> {
120        IoBufferRef {
121            pointer: self.pointer as *const u8,
122            size: self.size,
123            _lifetime: PhantomData,
124        }
125    }
126
127    /// Generate an immutable reference to a sub-range.
128    pub fn as_ref_range(&self, range: Range<usize>) -> IoBufferRef<'_> {
129        IoBufferRef::from_slice(&self.as_ref().into_slice()[range])
130    }
131
132    /// Generate a mutable reference.
133    pub fn as_mut(&mut self) -> IoBufferMut<'_> {
134        IoBufferMut {
135            pointer: self.pointer,
136            size: self.size,
137            _lifetime: PhantomData,
138        }
139    }
140
141    /// Generate a mutable reference to a sub-range.
142    pub fn as_mut_range(&mut self, range: Range<usize>) -> IoBufferMut<'_> {
143        (&mut self.as_mut().into_slice()[range]).into()
144    }
145}
146
147impl Drop for IoBuffer {
148    /// Free this buffer.
149    fn drop(&mut self) {
150        if let Some(layout) = self.layout {
151            // Safe because we have allocated this buffer using `alloc::System`
152            unsafe {
153                alloc::System.dealloc(self.pointer, layout);
154            }
155        }
156    }
157}
158
159/// Common functions for both `IoBufferRef` and `IoBufferMut`.
160#[allow(dead_code)]
161pub(crate) trait IoBufferRefTrait<'a>: Sized {
162    /// `&[T]` or `&mut [T]`.
163    type SliceType<T: Copy + Sized + 'a>;
164
165    /// `*const T` or `*mut T`.
166    type PointerType<T: Copy + Sized + 'a>;
167
168    /// Create a reference to a slice.
169    fn from_slice(slice: Self::SliceType<u8>) -> Self;
170
171    /// Create an owned [`IoBuffer`] with the same data (copied).
172    fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer>;
173
174    /// Size in bytes.
175    fn len(&self) -> usize;
176
177    /// Whether the length is 0.
178    fn is_empty(&self) -> bool {
179        self.len() == 0
180    }
181
182    /// Return the pointer to the start of the buffer.
183    fn as_ptr(&self) -> Self::PointerType<u8>;
184
185    /// Turn this reference into a slice.
186    ///
187    /// References to `IoBuffer`s must not be copied/cloned (so they can only be accessed once;
188    /// they are considered volatile due to potential VM guest accesses), so this consumes the
189    /// object.
190    fn into_slice(self) -> Self::SliceType<u8> {
191        // Alignment requirement is always met, resulting data is pure binary data
192        unsafe { self.into_typed_slice::<u8>() }
193    }
194
195    /// Turn this reference into a slice with the given element type.
196    ///
197    /// # Safety
198    /// Caller must ensure that alignment and length requirements are met and that the resulting
199    /// data is valid.
200    unsafe fn into_typed_slice<T: Copy + Sized>(self) -> Self::SliceType<T>;
201
202    /// Split the buffer at `mid`.
203    ///
204    /// Return `&self[..mid]` and `&self[mid..]`.
205    ///
206    /// If `mid > self.len()`, return `&self[..]` and `[]`.
207    fn split_at(self, mid: usize) -> (Self, Self);
208
209    /// Make this reference immutable.
210    fn into_ref(self) -> IoBufferRef<'a>;
211}
212
213impl<'a> IoBufferRef<'a> {
214    /// Create a reference to a slice.
215    pub fn from_slice(slice: &'a [u8]) -> Self {
216        IoBufferRef {
217            pointer: slice.as_ptr(),
218            size: size_of_val(slice),
219            _lifetime: PhantomData,
220        }
221    }
222
223    /// Create an owned [`IoBuffer`] with the same data (copied).
224    pub fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer> {
225        let mut new_buf = IoBuffer::new(self.len(), alignment)?;
226        new_buf
227            .as_mut()
228            .into_slice()
229            .copy_from_slice(self.into_slice());
230        Ok(new_buf)
231    }
232
233    /// Size in bytes.
234    pub fn len(&self) -> usize {
235        self.size
236    }
237
238    /// Whether the length is 0.
239    pub fn is_empty(&self) -> bool {
240        self.len() == 0
241    }
242
243    /// Return the pointer to the start of the buffer.
244    pub fn as_ptr(&self) -> *const u8 {
245        self.pointer
246    }
247
248    /// Turn this reference into a slice.
249    ///
250    /// References to `IoBuffer`s must not be copied/cloned (so they can only be accessed once;
251    /// they are considered volatile due to potential VM guest accesses), so this consumes the
252    /// object.
253    pub fn into_slice(self) -> &'a [u8] {
254        // Alignment requirement is always met, resulting data is pure binary data
255        unsafe { self.into_typed_slice::<u8>() }
256    }
257
258    /// Turn this reference into a slice with the given element type.
259    ///
260    /// # Safety
261    /// Caller must ensure that alignment and length requirements are met and that the resulting
262    /// data is valid.
263    pub unsafe fn into_typed_slice<T: Copy + Sized>(self) -> &'a [T] {
264        // Safety ensured by the caller; we ensure that nothing outside of this buffer will be part
265        // of the slice
266        unsafe { slice::from_raw_parts(self.as_ptr() as *const T, self.len() / size_of::<T>()) }
267    }
268
269    /// Split the buffer at `mid`.
270    ///
271    /// Return `&self[..mid]` and `&self[mid..]`.
272    ///
273    /// If `mid > self.len()`, return `&self[..]` and `[]`.
274    pub fn split_at(self, mid: usize) -> (IoBufferRef<'a>, IoBufferRef<'a>) {
275        let head_len = cmp::min(mid, self.size);
276
277        (
278            IoBufferRef {
279                pointer: self.pointer,
280                size: head_len,
281                _lifetime: PhantomData,
282            },
283            IoBufferRef {
284                // Safe because we have limited this to `self.size`
285                pointer: unsafe { self.pointer.add(head_len) },
286                size: self.size - head_len,
287                _lifetime: PhantomData,
288            },
289        )
290    }
291
292    /// Make this reference immutable.
293    pub fn into_ref(self) -> IoBufferRef<'a> {
294        self
295    }
296}
297
298impl<'a> IoBufferRefTrait<'a> for IoBufferRef<'a> {
299    type SliceType<T: Copy + Sized + 'a> = &'a [T];
300    type PointerType<T: Copy + Sized + 'a> = *const T;
301
302    passthrough_trait_fn! { fn from_slice(slice: Self::SliceType<u8>) -> Self; }
303    passthrough_trait_fn! { fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer>; }
304    passthrough_trait_fn! { fn len(&self) -> usize; }
305    passthrough_trait_fn! { fn as_ptr(&self) -> Self::PointerType<u8>; }
306    passthrough_trait_fn! { fn split_at(self, mid: usize) -> (Self, Self); }
307    passthrough_trait_fn! { fn into_ref(self) -> IoBufferRef<'a>; }
308
309    unsafe fn into_typed_slice<T: Copy + Sized>(self) -> Self::SliceType<T> {
310        // Safety ensured by caller
311        unsafe { Self::into_typed_slice(self) }
312    }
313}
314
315impl<'a> From<IoSlice<'a>> for IoBufferRef<'a> {
316    fn from(slice: IoSlice<'a>) -> Self {
317        IoBufferRef {
318            pointer: slice.as_ptr(),
319            size: slice.len(),
320            _lifetime: PhantomData,
321        }
322    }
323}
324
325impl<'a> From<IoBufferRef<'a>> for IoSlice<'a> {
326    fn from(buf: IoBufferRef<'a>) -> Self {
327        IoSlice::new(buf.into_slice())
328    }
329}
330
331impl<'a> IoBufferMut<'a> {
332    /// Create a reference to a slice.
333    pub fn from_slice(slice: &'a mut [u8]) -> Self {
334        IoBufferMut {
335            pointer: slice.as_mut_ptr(),
336            size: size_of_val(slice),
337            _lifetime: PhantomData,
338        }
339    }
340
341    /// Create an owned [`IoBuffer`] with the same data (copied).
342    pub fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer> {
343        let mut new_buf = IoBuffer::new(self.len(), alignment)?;
344        new_buf
345            .as_mut()
346            .into_slice()
347            .copy_from_slice(self.into_slice());
348        Ok(new_buf)
349    }
350
351    /// Size in bytes.
352    pub fn len(&self) -> usize {
353        self.size
354    }
355
356    /// Whether the length is 0.
357    pub fn is_empty(&self) -> bool {
358        self.len() == 0
359    }
360
361    /// Return the pointer to the start of the buffer.
362    pub fn as_ptr(&self) -> *mut u8 {
363        self.pointer
364    }
365
366    /// Turn this reference into a slice.
367    ///
368    /// References to `IoBuffer`s must not be copied/cloned (so they can only be accessed once;
369    /// they are considered volatile due to potential VM guest accesses), so this consumes the
370    /// object.
371    pub fn into_slice(self) -> &'a mut [u8] {
372        // Alignment requirement is always met, resulting data is pure binary data
373        unsafe { self.into_typed_slice::<u8>() }
374    }
375
376    /// Turn this reference into a slice with the given element type.
377    ///
378    /// # Safety
379    /// Caller must ensure that alignment and length requirements are met and that the resulting
380    /// data is valid.
381    pub unsafe fn into_typed_slice<T: Copy + Sized>(self) -> &'a mut [T] {
382        // Safety ensured by the caller; we ensure that nothing outside of this buffer will be part
383        // of the slice
384        unsafe { slice::from_raw_parts_mut(self.as_ptr() as *mut T, self.len() / size_of::<T>()) }
385    }
386
387    /// Split the buffer at `mid`.
388    ///
389    /// Return `&self[..mid]` and `&self[mid..]`.
390    ///
391    /// If `mid > self.len()`, return `&self[..]` and `[]`.
392    pub fn split_at(self, mid: usize) -> (IoBufferMut<'a>, IoBufferMut<'a>) {
393        let head_len = cmp::min(mid, self.size);
394
395        (
396            IoBufferMut {
397                pointer: self.pointer,
398                size: head_len,
399                _lifetime: PhantomData,
400            },
401            IoBufferMut {
402                // Safe because we have limited this to `self.size`
403                pointer: unsafe { self.pointer.add(head_len) },
404                size: self.size - head_len,
405                _lifetime: PhantomData,
406            },
407        )
408    }
409
410    /// Make this reference immutable.
411    pub fn into_ref(self) -> IoBufferRef<'a> {
412        IoBufferRef {
413            pointer: self.pointer,
414            size: self.size,
415            _lifetime: PhantomData,
416        }
417    }
418}
419
420impl<'a> IoBufferRefTrait<'a> for IoBufferMut<'a> {
421    type SliceType<T: Copy + Sized + 'a> = &'a mut [T];
422    type PointerType<T: Copy + Sized + 'a> = *mut T;
423
424    passthrough_trait_fn! { fn from_slice(slice: Self::SliceType<u8>) -> Self; }
425    passthrough_trait_fn! { fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer>; }
426    passthrough_trait_fn! { fn len(&self) -> usize; }
427    passthrough_trait_fn! { fn as_ptr(&self) -> Self::PointerType<u8>; }
428    passthrough_trait_fn! { fn split_at(self, mid: usize) -> (Self, Self); }
429    passthrough_trait_fn! { fn into_ref(self) -> IoBufferRef<'a>; }
430
431    unsafe fn into_typed_slice<T: Copy + Sized>(self) -> Self::SliceType<T> {
432        // Safety ensured by caller
433        unsafe { Self::into_typed_slice(self) }
434    }
435}
436
437impl<'a, T: Sized> From<&'a mut [T]> for IoBufferMut<'a> {
438    fn from(slice: &'a mut [T]) -> Self {
439        IoBufferMut {
440            pointer: slice.as_mut_ptr() as *mut u8,
441            size: size_of_val(slice),
442            _lifetime: PhantomData,
443        }
444    }
445}
446
447impl<'a> From<IoSliceMut<'a>> for IoBufferMut<'a> {
448    fn from(mut slice: IoSliceMut<'a>) -> Self {
449        IoBufferMut {
450            pointer: slice.as_mut_ptr(),
451            size: slice.len(),
452            _lifetime: PhantomData,
453        }
454    }
455}
456
457impl<'a> From<IoBufferMut<'a>> for IoSliceMut<'a> {
458    fn from(buf: IoBufferMut<'a>) -> Self {
459        IoSliceMut::new(buf.into_slice())
460    }
461}
462
463/// Common functions for both `IoVector` and `IoVectorMut`.
464#[allow(dead_code)]
465pub(crate) trait IoVectorTrait: Sized {
466    /// `&[u8]` or `&mut [u8]`.
467    type SliceType;
468
469    /// `IoSlice` or `IoSliceMut`.
470    type BufferType;
471
472    /// Create an empty vector.
473    fn new() -> Self;
474
475    /// Create an empty vector, pre-allocating space for `cap` buffers.
476    ///
477    /// This does not allocate an memory buffer, only space in the buffer vector.
478    fn with_capacity(cap: usize) -> Self;
479
480    /// Append a slice.
481    fn push(&mut self, slice: Self::SliceType);
482
483    /// Append a slice.
484    fn push_ioslice(&mut self, ioslice: Self::BufferType);
485
486    /// Insert a slice at the given `index` in the buffer vector.
487    fn insert(&mut self, index: usize, slice: Self::SliceType);
488
489    /// Return the sum total length in bytes of all buffers in this vector.
490    fn len(&self) -> u64;
491
492    /// Return the number of buffers in this vector.
493    fn buffer_count(&self) -> usize;
494
495    /// Return `true` if and only if this vector’s length is zero.
496    ///
497    /// Synonymous with whether this vector’s buffer count is zero.
498    fn is_empty(&self) -> bool {
499        debug_assert!((self.len() == 0) == (self.buffer_count() == 0));
500        self.len() == 0
501    }
502
503    /// Append all buffers from the given other vector to this vector.
504    fn append(&mut self, other: Self);
505
506    /// Split the vector into two.
507    ///
508    /// The first returned vector contains the bytes in the `[..mid]` range, and the second one
509    /// covers the `[mid..]` range.
510    fn split_at(self, mid: u64) -> (Self, Self);
511
512    /// Like [`IoVectorTrait::split_at()`], but discards the head, only returning the tail.
513    ///
514    /// More efficient than to use `self.split_at(mid).1` because the former requires creating a
515    /// new `Vec` object for the head, which this version skips.
516    fn split_tail_at(self, mid: u64) -> Self;
517
518    /// Copy the data from `self` into `slice`.
519    ///
520    /// Both must have the same length.
521    fn copy_into_slice(&self, slice: &mut [u8]);
522
523    /// Create a single owned [`IoBuffer`] with the same data (copied).
524    fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer>;
525
526    /// Return a corresponding `&[libc::iovec]`.
527    ///
528    /// # Safety
529    /// `iovec` has no lifetime information.  Callers must ensure no elements in the returned slice
530    /// are used beyond the lifetime `'_`.
531    #[cfg(unix)]
532    unsafe fn as_iovec<'a>(&'a self) -> &'a [libc::iovec]
533    where
534        Self: 'a;
535
536    /// Check whether `self` is aligned.
537    ///
538    /// Each buffer must be aligned to `mem_alignment`, and each buffer’s length must be aligned to
539    /// both `mem_alignment` and `req_alignment` (the I/O request offset/size alignment).
540    fn is_aligned(&self, mem_alignment: usize, req_alignment: usize) -> bool;
541
542    /// Return the internal vector of `IoSlice` objects.
543    fn into_inner(self) -> Vec<Self::BufferType>;
544}
545
546/// Implement most of both `IoVector` and `IoVectorMut`.
547macro_rules! impl_io_vector {
548    ($type:tt, $inner_type:tt, $buffer_type:tt, $slice_type:ty, $slice_type_lifetime_b:ty) => {
549        /// Vector of memory buffers.
550        pub struct $type<'a> {
551            /// Buffer list.
552            vector: Vec<$inner_type<'a>>,
553
554            /// Complete size in bytes.
555            total_size: u64,
556        }
557
558        impl<'a> $type<'a> {
559            /// Create an empty vector.
560            pub fn new() -> Self {
561                Self::default()
562            }
563
564            /// Create an empty vector, pre-allocating space for `cap` buffers.
565            ///
566            /// This does not allocate an memory buffer, only space in the buffer vector.
567            pub fn with_capacity(cap: usize) -> Self {
568                $type {
569                    vector: Vec::with_capacity(cap),
570                    total_size: 0,
571                }
572            }
573
574            /// Append a slice.
575            pub fn push(&mut self, slice: $slice_type) {
576                debug_assert!(!slice.is_empty());
577                self.total_size += slice.len() as u64;
578                self.vector.push($inner_type::new(slice));
579            }
580
581            /// Append a slice.
582            pub fn push_ioslice(&mut self, ioslice: $inner_type<'a>) {
583                debug_assert!(!ioslice.is_empty());
584                self.total_size += ioslice.len() as u64;
585                self.vector.push(ioslice);
586            }
587
588            /// Insert a slice at the given `index` in the buffer vector.
589            pub fn insert(&mut self, index: usize, slice: $slice_type) {
590                debug_assert!(!slice.is_empty());
591                self.total_size += slice.len() as u64;
592                self.vector.insert(index, $inner_type::new(slice));
593            }
594
595            /// Return the sum total length in bytes of all buffers in this vector.
596            pub fn len(&self) -> u64 {
597                self.total_size
598            }
599
600            /// Return the number of buffers in this vector.
601            pub fn buffer_count(&self) -> usize {
602                self.vector.len()
603            }
604
605            /// Return `true` if and only if this vector’s length is zero.
606            ///
607            /// Synonymous with whether this vector’s buffer count is zero.
608            pub fn is_empty(&self) -> bool {
609                debug_assert!((self.len() == 0) == (self.buffer_count() == 0));
610                self.len() == 0
611            }
612
613            /// Append all buffers from the given other vector to this vector.
614            pub fn append(&mut self, mut other: Self) {
615                self.total_size += other.total_size;
616                self.vector.append(&mut other.vector);
617            }
618
619            /// Split the vector into two.
620            ///
621            /// The first returned vector contains the bytes in the `[..mid]` range, and the second
622            /// one covers the `[mid..]` range.
623            pub fn split_at(self, mid: u64) -> (Self, Self) {
624                let (head, tail) = self.do_split_at(mid, true);
625                (head.unwrap(), tail)
626            }
627
628            /// Like [`Self::split_at()`], but discards the head, only returning the tail.
629            ///
630            /// More efficient than to use `self.split_at(mid).1` because the former requires
631            /// creating a new `Vec` object for the head, which this version skips.
632            pub fn split_tail_at(self, mid: u64) -> Self {
633                self.do_split_at(mid, false).1
634            }
635
636            /// Copy the data from `self` into `slice`.
637            ///
638            /// Both must have the same length.
639            pub fn copy_into_slice(&self, slice: &mut [u8]) {
640                if slice.len() as u64 != self.total_size {
641                    panic!("IoVectorTrait::copy_into_slice() called on a slice of different length from the vector");
642                }
643
644                assert!(self.total_size <= usize::MAX as u64);
645
646                let mut offset = 0usize;
647                for elem in self.vector.iter() {
648                    let next_offset = offset + elem.len();
649                    slice[offset..next_offset].copy_from_slice(&elem[..]);
650                    offset = next_offset;
651                }
652            }
653
654            /// Create a single owned [`IoBuffer`] with the same data (copied).
655            pub fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer> {
656                let size = self.total_size.try_into().map_err(|_| {
657                    io::Error::other(format!("Buffer is too big ({})", self.total_size))
658                })?;
659                let mut new_buf = IoBuffer::new(size, alignment)?;
660                self.copy_into_slice(new_buf.as_mut().into_slice());
661                Ok(new_buf)
662            }
663
664            /// Return a corresponding `&[libc::iovec]`.
665            ///
666            /// # Safety
667            /// `iovec` has no lifetime information.  Callers must ensure no elements in the
668            /// returned slice are used beyond the lifetime `'_`.
669            #[cfg(unix)]
670            pub unsafe fn as_iovec<'b>(&'b self) -> &'b [libc::iovec] where Self: 'b {
671                // IoSlice and IoSliceMut are defined to have the same representation in memory as
672                // libc::iovec does
673                unsafe {
674                    mem::transmute::<&'b [$inner_type<'b>], &'b [libc::iovec]>(&self.vector[..])
675                }
676            }
677
678            /// Check whether `self` is aligned.
679            ///
680            /// Each buffer must be aligned to `mem_alignment`, and each buffer’s length must be
681            /// aligned to both `mem_alignment` and `req_alignment` (the I/O request offset/size
682            /// alignment).
683            pub fn is_aligned(&self, mem_alignment: usize, req_alignment: usize) -> bool {
684                // Trivial case
685                if mem_alignment == 1 && req_alignment == 1 {
686                    return true;
687                }
688
689                debug_assert!(mem_alignment.is_power_of_two() && req_alignment.is_power_of_two());
690                let base_align_mask = mem_alignment - 1;
691                let len_align_mask = base_align_mask | (req_alignment - 1);
692
693                self.vector.iter().all(|buf| {
694                    buf.as_ptr() as usize & base_align_mask == 0 &&
695                        buf.len() & len_align_mask == 0
696                })
697            }
698
699            /// Return the internal vector of `IoSlice` objects.
700            pub fn into_inner(self) -> Vec<$inner_type<'a>> {
701                self.vector
702            }
703
704            /// Same as [`Self::push()`], but takes ownership of `self`.
705            ///
706            /// By taking ownership of `self` and returning it, this method allows reducing the
707            /// lifetime of `self` to that of `slice`, if necessary.
708            pub fn with_pushed<'b>(self, slice: $slice_type_lifetime_b) -> $type<'b>
709            where
710                'a: 'b,
711            {
712                let mut vec: $type<'b> = self;
713                vec.push(slice);
714                vec
715            }
716
717            /// Same as [`Self::insert()`], but takes ownership of `self.`
718            ///
719            /// By taking ownership of `self` and returning it, this method allows reducing the
720            /// lifetime of `self` to that of `slice`, if necessary.
721            pub fn with_inserted<'b>(self, index: usize, slice: $slice_type_lifetime_b) -> $type<'b>
722            where
723                'a: 'b,
724            {
725                let mut vec: $type<'b> = self;
726                vec.insert(index, slice);
727                vec
728            }
729
730            /// Implementation for [`Self::split_at()`] and [`Self::split_tail_at()`].
731            ///
732            /// If `keep_head` is true, both head and tail are returned ([`Self::split_at()`]).
733            /// Otherwise, the head is discarded ([`Self::split_tail_at()`]).
734            fn do_split_at(mut self, mid: u64, keep_head: bool) -> (Option<$type<'a>>, $type<'a>) {
735                if mid >= self.total_size {
736                    // Special case: Empty tail
737                    return (
738                        keep_head.then_some(self),
739                        $type {
740                            vector: Vec::new(),
741                            total_size: 0,
742                        },
743                    );
744                }
745
746                let mut i = 0; // Current element index
747                let mut offset = 0u64; // Current element offset
748                let (vec_head, vec_tail) = loop {
749                    if offset == mid {
750                        // Clean split: `i` is fully behind `mid`, the rest is fully ahead
751                        if keep_head {
752                            let mut vec_head = self.vector;
753                            let vec_tail = vec_head.split_off(i);
754                            break (Some(vec_head), vec_tail);
755                        } else {
756                            break (None, self.vector.split_off(i));
757                        }
758                    }
759
760                    let post_elm_offset = offset + self.vector[i].len() as u64;
761
762                    if post_elm_offset > mid {
763                        // Not so clean split: The beginning of this element was before `mid`, the end is
764                        // behind it, so we must split this element between head and tail
765                        let mut vec_head = self.vector;
766                        let mut tail_iter = vec_head.drain(i..);
767
768                        // This is the current element (at `i`), which must be present
769                        let mid_elm = tail_iter.next().unwrap();
770                        let mid_elm: $buffer_type<'a> = mid_elm.into();
771
772                        // Each element's length is of type usize, so this must fit into usize
773                        let mid_elm_head_len: usize = (mid - offset).try_into().unwrap();
774                        let (mid_head, mid_tail) = mid_elm.split_at(mid_elm_head_len);
775
776                        let mut vec_tail: Vec<$inner_type<'a>> = vec![mid_tail.into()];
777                        vec_tail.extend(tail_iter);
778
779                        if keep_head {
780                            vec_head.push(mid_head.into());
781                            break (Some(vec_head), vec_tail);
782                        } else {
783                            break (None, vec_tail);
784                        }
785                    }
786
787                    offset = post_elm_offset;
788
789                    i += 1;
790                    // We know that `mid < self.total_size`, so we must encounter `mid before the end of
791                    // the vector
792                    assert!(i < self.vector.len());
793                };
794
795                let head = keep_head.then(|| $type {
796                    vector: vec_head.unwrap(),
797                    total_size: mid,
798                });
799                let tail = $type {
800                    vector: vec_tail,
801                    total_size: self.total_size - mid,
802                };
803
804                (head, tail)
805            }
806        }
807
808        impl<'a> IoVectorTrait for $type<'a> {
809            type SliceType = $slice_type;
810            type BufferType = $inner_type<'a>;
811
812            passthrough_trait_fn! { fn new() -> Self; }
813            passthrough_trait_fn! { fn with_capacity(cap: usize) -> Self; }
814            passthrough_trait_fn! { fn push(&mut self, slice: Self::SliceType); }
815            passthrough_trait_fn! { fn push_ioslice(&mut self, ioslice: Self::BufferType); }
816            passthrough_trait_fn! { fn insert(&mut self, index: usize, slice: Self::SliceType); }
817            passthrough_trait_fn! { fn len(&self) -> u64; }
818            passthrough_trait_fn! { fn buffer_count(&self) -> usize; }
819            passthrough_trait_fn! { fn append(&mut self, other: Self); }
820            passthrough_trait_fn! { fn split_at(self, mid: u64) -> (Self, Self); }
821            passthrough_trait_fn! { fn split_tail_at(self, mid: u64) -> Self; }
822            passthrough_trait_fn! { fn copy_into_slice(&self, slice: &mut [u8]); }
823            passthrough_trait_fn! { fn try_into_owned(self, alignment: usize) -> io::Result<IoBuffer>; }
824            passthrough_trait_fn! { fn is_aligned(&self, mem_alignment: usize, req_alignment: usize) -> bool; }
825            passthrough_trait_fn! { fn into_inner(self) -> Vec<Self::BufferType>; }
826
827            #[cfg(unix)]
828            unsafe fn as_iovec<'b>(&'b self) -> &'b [libc::iovec]
829            where
830                Self: 'b
831            {
832                // Safety ensured by caller
833                unsafe { Self::as_iovec(self) }
834            }
835        }
836
837        impl<'a> From<Vec<$inner_type<'a>>> for $type<'a> {
838            fn from(vector: Vec<$inner_type<'a>>) -> Self {
839                let total_size = vector
840                    .iter()
841                    .map(|e| e.len())
842                    .fold(0u64, |sum, e| sum + e as u64);
843
844                $type { vector, total_size }
845            }
846        }
847
848        impl<'a> From<$buffer_type<'a>> for $type<'a> {
849            fn from(buffer: $buffer_type<'a>) -> Self {
850                let total_size = buffer.len() as u64;
851                if total_size > 0 {
852                    $type {
853                        vector: vec![buffer.into()],
854                        total_size,
855                    }
856                } else {
857                    $type {
858                        vector: Vec::new(),
859                        total_size: 0,
860                    }
861                }
862            }
863        }
864
865        impl<'a> From<$slice_type> for $type<'a> {
866            fn from(slice: $slice_type) -> Self {
867                let total_size = slice.len() as u64;
868                if total_size > 0 {
869                    $type {
870                        vector: vec![$inner_type::new(slice)],
871                        total_size,
872                    }
873                } else {
874                    $type {
875                        vector: Vec::new(),
876                        total_size: 0,
877                    }
878                }
879            }
880        }
881
882        impl<'a> Default for $type<'a> {
883            fn default() -> Self {
884                $type {
885                    vector: Vec::new(),
886                    total_size: 0,
887                }
888            }
889        }
890
891        impl Debug for $type<'_> {
892            fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
893                f.debug_struct(std::stringify!($type))
894                    .field("vector.len()", &self.vector.len())
895                    .field("total_size", &self.total_size)
896                    .finish()
897            }
898        }
899    };
900}
901
902impl_io_vector!(IoVector, IoSlice, IoBufferRef, &'a [u8], &'b [u8]);
903impl_io_vector!(
904    IoVectorMut,
905    IoSliceMut,
906    IoBufferMut,
907    &'a mut [u8],
908    &'b mut [u8]
909);
910
911#[cfg(feature = "vm-memory")]
912impl<'a> IoVector<'a> {
913    /// Converts a `VolatileSlice` array (from vm-memory) into an `IoVector`.
914    ///
915    /// In addition to a the vector, return a guard that ensures that the memory in `slices` is
916    /// indeed mapped while in use.  This guard must not be dropped while this vector is in use!
917    pub fn from_volatile_slice<
918        B: vm_memory::bitmap::BitmapSlice,
919        I: IntoIterator<
920            Item: ImagoAsRef<'a, vm_memory::VolatileSlice<'a, B>>,
921            IntoIter: ExactSizeIterator,
922        >,
923    >(
924        slices: I,
925    ) -> (
926        Self,
927        VolatileSliceGuard<'a, vm_memory::volatile_memory::PtrGuard, B>,
928    ) {
929        let ptr_guards = slices
930            .into_iter()
931            .map(|slice| slice.as_ref().ptr_guard())
932            .collect::<Vec<_>>();
933        let buffers = ptr_guards
934            .iter()
935            .map(|pg| {
936                // Safe because this whole module basically exists to follow the same design concepts
937                // as `VolatileSlice`.
938                let slice = unsafe { std::slice::from_raw_parts(pg.as_ptr(), pg.len()) };
939                IoSlice::new(slice)
940            })
941            .collect::<Vec<_>>();
942
943        let vector = IoVector::from(buffers);
944        let guard = VolatileSliceGuard {
945            _ptr_guards: ptr_guards,
946            // `IoVector` is immutable, so no need to dirty
947            dirty_on_drop: None,
948        };
949
950        (vector, guard)
951    }
952}
953
954impl IoVectorMut<'_> {
955    /// Fill all buffers in the vector with the given byte pattern.
956    pub fn fill(&mut self, value: u8) {
957        for slice in self.vector.iter_mut() {
958            slice.fill(value);
959        }
960    }
961
962    /// Copy data from `slice` into the buffers in this vector.
963    ///
964    /// The vector and the slice must have the same total length.
965    pub fn copy_from_slice(&mut self, slice: &[u8]) {
966        if slice.len() as u64 != self.total_size {
967            panic!("IoVectorMut::copy_from_slice() called on a slice of different length from the vector");
968        }
969
970        assert!(self.total_size <= usize::MAX as u64);
971
972        let mut offset = 0usize;
973        for elem in self.vector.iter_mut() {
974            let next_offset = offset + elem.len();
975            elem.copy_from_slice(&slice[offset..next_offset]);
976            offset = next_offset;
977        }
978    }
979}
980
981#[cfg(feature = "vm-memory")]
982impl<'a> IoVectorMut<'a> {
983    /// Converts a `VolatileSlice` array (from vm-memory) into an `IoVectorMut`.
984    ///
985    /// In addition to a the vector, return a guard that ensures that the memory in `slices` is
986    /// indeed mapped while in use.  This guard must not be dropped while this vector is in use!
987    pub fn from_volatile_slice<
988        B: vm_memory::bitmap::BitmapSlice,
989        I: IntoIterator<
990            Item: ImagoAsRef<'a, vm_memory::VolatileSlice<'a, B>>,
991            IntoIter: ExactSizeIterator,
992        >,
993    >(
994        slices: I,
995    ) -> (
996        Self,
997        VolatileSliceGuard<'a, vm_memory::volatile_memory::PtrGuardMut, B>,
998    ) {
999        let slices = slices.into_iter();
1000        let slice_count = slices.len();
1001        let mut ptr_guards = Vec::with_capacity(slice_count);
1002        let mut dirty_on_drop = Vec::with_capacity(slice_count);
1003
1004        for slice in slices {
1005            let slice = slice.as_ref();
1006            ptr_guards.push(slice.ptr_guard_mut());
1007            // `IoVector` is mutable, so we can assume it will all be written
1008            dirty_on_drop.push((slice.bitmap(), slice.len()));
1009        }
1010
1011        let buffers = ptr_guards
1012            .iter()
1013            .map(|pg| {
1014                // Safe because this whole module basically exists to follow the same design concepts
1015                // as `VolatileSlice`.
1016                let slice = unsafe { std::slice::from_raw_parts_mut(pg.as_ptr(), pg.len()) };
1017                IoSliceMut::new(slice)
1018            })
1019            .collect::<Vec<_>>();
1020
1021        let vector = IoVectorMut::from(buffers);
1022        let guard = VolatileSliceGuard {
1023            _ptr_guards: ptr_guards,
1024            dirty_on_drop: Some(dirty_on_drop),
1025        };
1026
1027        (vector, guard)
1028    }
1029}
1030
1031impl<'a> From<&'a Vec<u8>> for IoVector<'a> {
1032    fn from(vec: &'a Vec<u8>) -> Self {
1033        vec.as_slice().into()
1034    }
1035}
1036
1037impl<'a> From<&'a IoBuffer> for IoVector<'a> {
1038    fn from(buf: &'a IoBuffer) -> Self {
1039        buf.as_ref().into_slice().into()
1040    }
1041}
1042
1043impl<'a> From<&'a mut Vec<u8>> for IoVectorMut<'a> {
1044    fn from(vec: &'a mut Vec<u8>) -> Self {
1045        vec.as_mut_slice().into()
1046    }
1047}
1048
1049impl<'a> From<&'a mut IoBuffer> for IoVectorMut<'a> {
1050    fn from(buf: &'a mut IoBuffer) -> Self {
1051        buf.as_mut().into_slice().into()
1052    }
1053}
1054
1055/// Ensures an I/O vector’s validity when created from `[VolatileSlice]`.
1056///
1057/// `[VolatileSlice]` arrays may require being explicitly mapped before use (and unmapped after),
1058/// and this guard ensures that the memory is mapped until it is dropped.
1059///
1060/// Further, for mutable vectors ([`IoVectorMut`]), it will also dirty the corresponding bitmap
1061/// slices when dropped, assuming the whole vector has been written.
1062#[cfg(feature = "vm-memory")]
1063pub struct VolatileSliceGuard<'a, PtrGuardType, BitmapType: vm_memory::bitmap::Bitmap> {
1064    /// vm-memory’s pointer guards ensuring the memory remains mapped while used.
1065    _ptr_guards: Vec<PtrGuardType>,
1066
1067    /// If given, mark the given dirty bitmap range as dirty when dropping this guard.
1068    ///
1069    /// `.1` is the length of the respective `VolatileSlice` (i.e. the length of the area to
1070    /// dirty).
1071    dirty_on_drop: Option<Vec<(&'a BitmapType, usize)>>,
1072}
1073
1074#[cfg(feature = "vm-memory")]
1075impl<P, B: vm_memory::bitmap::Bitmap> Drop for VolatileSliceGuard<'_, P, B> {
1076    fn drop(&mut self) {
1077        if let Some(dirty_on_drop) = self.dirty_on_drop.take() {
1078            for (bitmap, len) in dirty_on_drop {
1079                // Every bitmap is a window into the full bitmap for its specific `VolatileSlice`,
1080                // so marking the whole thing is dirty is correct.
1081                bitmap.mark_dirty(0, len);
1082            }
1083        }
1084    }
1085}
1086
1087#[cfg(all(test, feature = "vm-memory"))]
1088mod vm_memory_test {
1089    use crate::io_buffers::{IoVector, IoVectorMut};
1090    use vm_memory::bitmap::BitmapSlice;
1091    use vm_memory::VolatileSlice;
1092
1093    pub fn do_test_volatile_slice_owned<B: BitmapSlice>(slices: &[VolatileSlice<B>]) {
1094        {
1095            let _vec = IoVector::from_volatile_slice(slices);
1096        }
1097        {
1098            let _vec = IoVectorMut::from_volatile_slice(slices);
1099        }
1100    }
1101
1102    #[test]
1103    fn test_volatile_slice_owned() {
1104        let empty: Vec<VolatileSlice<()>> = Vec::new();
1105        do_test_volatile_slice_owned(&empty);
1106    }
1107
1108    pub fn do_test_volatile_slice_ref<B: BitmapSlice>(slices: &[&VolatileSlice<B>]) {
1109        {
1110            let _vec = IoVector::from_volatile_slice(slices);
1111        }
1112        {
1113            let _vec = IoVectorMut::from_volatile_slice(slices);
1114        }
1115    }
1116
1117    #[test]
1118    fn test_volatile_slice_ref() {
1119        let empty: Vec<&vm_memory::VolatileSlice<()>> = Vec::new();
1120        do_test_volatile_slice_ref(&empty);
1121    }
1122}