imago/format/
sync_wrappers.rs

1//! Synchronous wrapper around [`FormatAccess`].
2
3use super::drivers::FormatDriverInstance;
4use super::PreallocateMode;
5use crate::io_buffers::{IoVector, IoVectorMut};
6use crate::{FormatAccess, Mapping, Storage};
7use std::io;
8
9/// Synchronous wrapper around [`FormatAccess`].
10///
11/// Creates and keeps a tokio runtime in which to run I/O.
12pub struct SyncFormatAccess<S: Storage + 'static> {
13    /// Wrapped asynchronous [`FormatAccess`].
14    inner: FormatAccess<S>,
15
16    /// Tokio runtime in which I/O is run.
17    runtime: tokio::runtime::Runtime,
18}
19
20impl<S: Storage + 'static> SyncFormatAccess<S> {
21    /// Like [`FormatAccess::new()`], but create a synchronous wrapper.
22    pub fn new<D: FormatDriverInstance<Storage = S> + 'static>(inner: D) -> io::Result<Self> {
23        FormatAccess::new(inner).try_into()
24    }
25
26    /// Get a reference to the contained async [`FormatAccess`] object.
27    pub fn inner(&self) -> &FormatAccess<S> {
28        &self.inner
29    }
30
31    /// Return the disk size in bytes.
32    pub fn size(&self) -> u64 {
33        self.inner.size()
34    }
35
36    /// Set the number of simultaneous async requests per read.
37    ///
38    /// When issuing read requests, issue this many async requests in parallel (still in a single
39    /// thread).  The default count is `1`, i.e. no parallel requests.
40    ///
41    /// Note that inside of this synchronous wrapper, we still run async functions, so this setting
42    /// is valid even for [`SyncFormatAccess`].
43    pub fn set_async_read_parallelization(&mut self, count: usize) {
44        self.inner.set_async_read_parallelization(count)
45    }
46
47    /// Set the number of simultaneous async requests per write.
48    ///
49    /// When issuing write requests, issue this many async requests in parallel (still in a single
50    /// thread).  The default count is `1`, i.e. no parallel requests.
51    ///
52    /// Note that inside of this synchronous wrapper, we still run async functions, so this setting
53    /// is valid even for [`SyncFormatAccess`].
54    pub fn set_async_write_parallelization(&mut self, count: usize) {
55        self.inner.set_async_write_parallelization(count)
56    }
57
58    /// Minimal I/O alignment, for both length and offset.
59    ///
60    /// All requests to this image should be aligned to this value, both in length and offset.
61    ///
62    /// Requests that do not match this alignment will be realigned internally, which requires
63    /// creating bounce buffers and read-modify-write cycles for write requests, which is costly,
64    /// so should be avoided.
65    pub fn req_align(&self) -> usize {
66        self.inner.req_align()
67    }
68
69    /// Minimal memory buffer alignment, for both address and length.
70    ///
71    /// All buffers used in requests to this image should be aligned to this value, both their
72    /// address and length.
73    ///
74    /// Request buffers that do not match this alignment will be realigned internally, which
75    /// requires creating bounce buffers, which is costly, so should be avoided.
76    pub fn mem_align(&self) -> usize {
77        self.inner.mem_align()
78    }
79
80    /// Return the mapping at `offset`.
81    ///
82    /// Find what `offset` is mapped to, return that mapping information, and the length of that
83    /// continuous mapping (from `offset`).
84    pub fn get_mapping_sync(
85        &self,
86        offset: u64,
87        max_length: u64,
88    ) -> io::Result<(Mapping<'_, S>, u64)> {
89        self.runtime
90            .block_on(self.inner.get_mapping(offset, max_length))
91    }
92
93    /// Create a raw data mapping at `offset`.
94    ///
95    /// Ensure that `offset` is directly mapped to some storage object, up to a length of `length`.
96    /// Return the storage object, the corresponding offset there, and the continuous length that
97    /// we were able to map (less than or equal to `length`).
98    ///
99    /// If `overwrite` is true, the contents in the range are supposed to be overwritten and may be
100    /// discarded.  Otherwise, they are kept.
101    pub fn ensure_data_mapping(
102        &self,
103        offset: u64,
104        length: u64,
105        overwrite: bool,
106    ) -> io::Result<(&S, u64, u64)> {
107        self.runtime
108            .block_on(self.inner.ensure_data_mapping(offset, length, overwrite))
109    }
110
111    /// Read data at `offset` into `bufv`.
112    ///
113    /// Reads until `bufv` is filled completely, i.e. will not do short reads.  When reaching the
114    /// end of file, the rest of `bufv` is filled with 0.
115    pub fn readv(&self, bufv: IoVectorMut<'_>, offset: u64) -> io::Result<()> {
116        self.runtime.block_on(self.inner.readv(bufv, offset))
117    }
118
119    /// Read data at `offset` into `buf`.
120    ///
121    /// Reads until `buf` is filled completely, i.e. will not do short reads.  When reaching the
122    /// end of file, the rest of `buf` is filled with 0.
123    pub fn read<'a>(&'a self, buf: impl Into<IoVectorMut<'a>>, offset: u64) -> io::Result<()> {
124        self.readv(buf.into(), offset)
125    }
126
127    /// Write data from `bufv` to `offset`.
128    ///
129    /// Writes all data from `bufv` (or returns an error), i.e. will not do short writes.  Reaching
130    /// the end of file before the end of the buffer results in an error.
131    pub fn writev(&self, bufv: IoVector<'_>, offset: u64) -> io::Result<()> {
132        self.runtime.block_on(self.inner.writev(bufv, offset))
133    }
134
135    /// Write data from `buf` to `offset`.
136    ///
137    /// Writes all data from `bufv` (or returns an error), i.e. will not do short writes.  Reaching
138    /// the end of file before the end of the buffer results in an error.
139    pub fn write<'a>(&'a self, buf: impl Into<IoVector<'a>>, offset: u64) -> io::Result<()> {
140        self.writev(buf.into(), offset)
141    }
142
143    /// Ensure the given range reads as zeroes.
144    ///
145    /// May use efficient zeroing for a subset of the given range, if supported by the format.
146    /// Will not discard anything, which keeps existing data mappings usable, albeit writing to
147    /// mappings that are now zeroed may have no effect.
148    ///
149    /// Check if [`SyncFormatAccess::discard_to_zero()`] better suits your needs: It may work
150    /// better on a wider range of formats (`write_zeroes()` requires support for preallocated zero
151    /// clusters, which qcow2 does have, but other formats may not), and can actually free up
152    /// space.  However, because it can break existing data mappings, it requires a mutable `self`
153    /// reference.
154    pub fn write_zeroes(&self, offset: u64, length: u64) -> io::Result<()> {
155        self.runtime
156            .block_on(self.inner.write_zeroes(offset, length))
157    }
158
159    /// Discard the given range, ensure it is read back as zeroes.
160    ///
161    /// Effectively the same as [`SyncFormatAccess::write_zeroes()`], but discard as much of the
162    /// existing allocation as possible.  This breaks existing data mappings, so needs a mutable
163    /// reference to `self`, which ensures that existing data references (which have the lifetime
164    /// of an immutable `self` reference) cannot be kept.
165    ///
166    /// Areas that cannot be discarded (because of format-inherent alignment restrictions) are
167    /// still overwritten with zeroes, unless discarding is not supported altogether.
168    pub fn discard_to_zero(&mut self, offset: u64, length: u64) -> io::Result<()> {
169        self.runtime
170            .block_on(self.inner.discard_to_zero(offset, length))
171    }
172
173    /// Discard the given range, not guaranteeing specific data on read-back.
174    ///
175    /// Discard as much of the given range as possible, and keep the rest as-is.  Does not
176    /// guarantee any specific data on read-back, in contrast to
177    /// [`SyncFormatAccess::discard_to_zero()`].
178    ///
179    /// Discarding being unsupported by this format is still returned as an error
180    /// ([`std::io::ErrorKind::Unsupported`])
181    pub fn discard_to_any(&mut self, offset: u64, length: u64) -> io::Result<()> {
182        self.runtime
183            .block_on(self.inner.discard_to_any(offset, length))
184    }
185
186    /// Discard the given range, such that the backing image becomes visible.
187    ///
188    /// Discard as much of the given range as possible so that a backing image’s data becomes
189    /// visible, and keep the rest as-is.  This breaks existing data mappings, so needs a mutable
190    /// reference to `self`, which ensures that existing data references (which have the lifetime
191    /// of an immutable `self` reference) cannot be kept.
192    pub fn discard_to_backing(&mut self, offset: u64, length: u64) -> io::Result<()> {
193        self.runtime
194            .block_on(self.inner.discard_to_backing(offset, length))
195    }
196
197    /// Flush internal buffers.
198    ///
199    /// Does not necessarily sync those buffers to disk.  When using `flush()`, consider whether
200    /// you want to call `sync()` afterwards.
201    ///
202    /// Note that this will not drop the buffers, so they may still be used to serve later
203    /// accesses.  Use [`SyncFormatAccess::invalidate_cache()`] to drop all buffers.
204    pub fn flush(&self) -> io::Result<()> {
205        self.runtime.block_on(self.inner.flush())
206    }
207
208    /// Sync data already written to the storage hardware.
209    ///
210    /// This does not necessarily include flushing internal buffers, i.e. `flush`.  When using
211    /// `sync()`, consider whether you want to call `flush()` before it.
212    pub fn sync(&self) -> io::Result<()> {
213        self.runtime.block_on(self.inner.sync())
214    }
215
216    /// Drop internal buffers.
217    ///
218    /// This drops all internal buffers, but does not flush them!  All cached data is reloaded from
219    /// disk on subsequent accesses.
220    ///
221    /// # Safety
222    /// Not flushing internal buffers may cause image corruption.  You must ensure the on-disk
223    /// state is consistent.
224    pub unsafe fn invalidate_cache(&self) -> io::Result<()> {
225        // Safety ensured by caller
226        self.runtime
227            .block_on(unsafe { self.inner.invalidate_cache() })
228    }
229
230    /// Resize to the given size.
231    ///
232    /// Set the disk size to `new_size`.  If `new_size` is smaller than the current size, ignore
233    /// both preallocation modes and discard the data after `new_size`.
234    ///
235    /// If `new_size` is larger than the current size, `prealloc_mode` determines whether and how
236    /// the new range should be allocated; depending on the image format, is possible some
237    /// preallocation modes are not supported, in which case an [`std::io::ErrorKind::Unsupported`]
238    /// is returned.
239    ///
240    /// This may break existing data mappings, so needs a mutable reference to `self`, which
241    /// ensures that existing data references (which have the lifetime of an immutable `self`
242    /// reference) cannot be kept.
243    ///
244    /// See also [`SyncFormatAccess::resize_grow()`] and [`SyncFormatAccess::resize_shrink()`],
245    /// whose more specialized interface may be useful when you know whether you want to grow or
246    /// shrink the image.
247    pub fn resize(&mut self, new_size: u64, prealloc_mode: PreallocateMode) -> io::Result<()> {
248        self.runtime
249            .block_on(self.inner.resize(new_size, prealloc_mode))
250    }
251
252    /// Resize to the given size, which must be greater than the current size.
253    ///
254    /// Set the disk size to `new_size`, preallocating the new space according to `prealloc_mode`.
255    /// Depending on the image format, it is possible some preallocation modes are not supported,
256    /// in which case an [`std::io::ErrorKind::Unsupported`] is returned.
257    pub fn resize_grow(&self, new_size: u64, prealloc_mode: PreallocateMode) -> io::Result<()> {
258        self.runtime
259            .block_on(self.inner.resize_grow(new_size, prealloc_mode))
260    }
261
262    /// Truncate to the given size, which must be smaller than the current size.
263    ///
264    /// Set the disk size to `new_size`, discarding the data after `new_size`.
265    ///
266    /// May break existing data mappings thanks to the mutable `self` reference.
267    pub fn resize_shrink(&mut self, new_size: u64) -> io::Result<()> {
268        self.runtime.block_on(self.inner.resize_shrink(new_size))
269    }
270}
271
272impl<S: Storage> TryFrom<FormatAccess<S>> for SyncFormatAccess<S> {
273    type Error = io::Error;
274
275    fn try_from(async_access: FormatAccess<S>) -> io::Result<Self> {
276        let runtime = tokio::runtime::Builder::new_current_thread()
277            .build()
278            .map_err(|err| {
279                io::Error::other(format!(
280                    "Failed to create a tokio runtime for synchronous image access: {err}"
281                ))
282            })?;
283
284        Ok(SyncFormatAccess {
285            inner: async_access,
286            runtime,
287        })
288    }
289}
290
291// #[cfg(not(feature = "async-drop"))]
292impl<S: Storage> Drop for SyncFormatAccess<S> {
293    fn drop(&mut self) {
294        if let Err(err) = self.flush() {
295            let inner = &self.inner;
296            tracing::error!("Failed to flush {inner}: {err}");
297        }
298    }
299}