imago/format/
access.rs

1//! Actual public image access functionality.
2//!
3//! Provides access to different image formats via `FormatAccess` objects.
4
5use super::drivers::{FormatDriverInstance, ShallowMapping};
6use super::PreallocateMode;
7use crate::io_buffers::{IoVector, IoVectorMut};
8use crate::storage::ext::write_full_zeroes;
9use crate::vector_select::FutureVector;
10use crate::{Storage, StorageExt};
11use std::fmt::{self, Display, Formatter};
12use std::{cmp, io, ptr};
13
14/// Provides access to a disk image.
15#[derive(Debug)]
16pub struct FormatAccess<S: Storage + 'static> {
17    /// Image format driver.
18    inner: Box<dyn FormatDriverInstance<Storage = S>>,
19
20    /// Whether this image may be modified.
21    writable: bool,
22
23    /// How many asynchronous requests to perform per read request in parallel.
24    read_parallelization: usize,
25
26    /// How many asynchronous requests to perform per write request in parallel.
27    write_parallelization: usize,
28}
29
30/// Fully recursive mapping information.
31///
32/// Mapping information that resolves down to the storage object layer (except for special data).
33#[derive(Debug)]
34#[non_exhaustive]
35pub enum Mapping<'a, S: Storage + 'static> {
36    /// Raw data.
37    #[non_exhaustive]
38    Raw {
39        /// Storage object where this data is stored.
40        storage: &'a S,
41
42        /// Offset in `storage` where this data is stored.
43        offset: u64,
44
45        /// Whether this mapping may be written to.
46        ///
47        /// If `true`, you can directly write to `offset` on `storage` to change the disk image’s
48        /// data accordingly.
49        ///
50        /// If `false`, the disk image format does not allow writing to `offset` on `storage`; a
51        /// new mapping must be allocated first.
52        writable: bool,
53    },
54
55    /// Range is to be read as zeroes.
56    #[non_exhaustive]
57    Zero {
58        /// Whether these zeroes are explicit on this image (the top layer).
59        ///
60        /// Differential image formats (like qcow2) track information about the status for all
61        /// blocks in the image (called clusters in case of qcow2).  Perhaps most importantly, they
62        /// track whether a block is allocated or not:
63        /// - Allocated blocks have their data in the image.
64        /// - Unallocated blocks do not have their data in this image, but have to be read from a
65        ///   backing image (which results in [`ShallowMapping::Indirect`] mappings).
66        ///
67        /// Thus, such images represent the difference from their backing image (hence
68        /// “differential”).
69        ///
70        /// Without a backing image, this feature can be used for sparse allocation: Unallocated
71        /// blocks are simply interpreted to be zero.  These ranges will be noted as
72        /// [`Mapping::Zero`] with `explicit` set to false.
73        ///
74        /// Formats like qcow2 can track more information beyond just the allocation status,
75        /// though, for example, whether a block should read as zero. Such blocks similarly do not
76        /// need to have their data stored in the image file, but are still not treated as
77        /// unallocated, so will never be read from a backing image, regardless of whether one
78        /// exists or not.
79        ///
80        /// These ranges are noted as [`Mapping::Zero`] with `explicit` set to true.
81        explicit: bool,
82    },
83
84    /// End of file reached.
85    ///
86    /// The accompanying length is always 0.
87    #[non_exhaustive]
88    Eof {},
89
90    /// Data is encoded in some manner, e.g. compressed or encrypted.
91    ///
92    /// Such data cannot be accessed directly, but must be interpreted by the image format driver.
93    #[non_exhaustive]
94    Special {
95        /// Format layer where this special data was encountered.
96        layer: &'a FormatAccess<S>,
97
98        /// Original (“guest”) offset on `layer` to pass to `readv_special()`.
99        offset: u64,
100    },
101}
102
103// When adding new public methods, don’t forget to add them to sync_wrappers, too.
104impl<S: Storage + 'static> FormatAccess<S> {
105    /// Wrap a format driver instance in `FormatAccess`.
106    ///
107    /// `FormatAccess` provides I/O access to disk images, based on the functionality offered by
108    /// the individual format drivers via `FormatDriverInstance`.
109    pub fn new<D: FormatDriverInstance<Storage = S> + 'static>(inner: D) -> Self {
110        let writable = inner.writable();
111        FormatAccess {
112            inner: Box::new(inner),
113            read_parallelization: 1,
114            write_parallelization: 1,
115            writable,
116        }
117    }
118
119    /// Return the contained format driver instance.
120    pub fn inner(&self) -> &dyn FormatDriverInstance<Storage = S> {
121        self.inner.as_ref()
122    }
123
124    /// Return the contained format driver instance.
125    pub fn inner_mut(&mut self) -> &mut dyn FormatDriverInstance<Storage = S> {
126        self.inner.as_mut()
127    }
128
129    /// Return the disk size in bytes.
130    pub fn size(&self) -> u64 {
131        self.inner.size()
132    }
133
134    /// Set the number of simultaneous async requests per read.
135    ///
136    /// When issuing read requests, issue this many async requests in parallel (still in a single
137    /// thread).  The default count is `1`, i.e. no parallel requests.
138    pub fn set_async_read_parallelization(&mut self, count: usize) {
139        self.read_parallelization = count;
140    }
141
142    /// Set the number of simultaneous async requests per write.
143    ///
144    /// When issuing write requests, issue this many async requests in parallel (still in a single
145    /// thread).  The default count is `1`, i.e. no parallel requests.
146    pub fn set_async_write_parallelization(&mut self, count: usize) {
147        self.write_parallelization = count;
148    }
149
150    /// Return all storage dependencies of this image.
151    ///
152    /// Includes recursive dependencies, i.e. those from other image dependencies like backing
153    /// images.
154    pub(crate) fn collect_storage_dependencies(&self) -> Vec<&S> {
155        self.inner.collect_storage_dependencies()
156    }
157
158    /// Minimal I/O alignment, for both length and offset.
159    ///
160    /// All requests to this image should be aligned to this value, both in length and offset.
161    ///
162    /// Requests that do not match this alignment will be realigned internally, which requires
163    /// creating bounce buffers and read-modify-write cycles for write requests, which is costly,
164    /// so should be avoided.
165    pub fn req_align(&self) -> usize {
166        self.inner
167            .collect_storage_dependencies()
168            .into_iter()
169            .fold(1, |max, s| cmp::max(max, s.req_align()))
170    }
171
172    /// Minimal memory buffer alignment, for both address and length.
173    ///
174    /// All buffers used in requests to this image should be aligned to this value, both their
175    /// address and length.
176    ///
177    /// Request buffers that do not match this alignment will be realigned internally, which
178    /// requires creating bounce buffers, which is costly, so should be avoided.
179    pub fn mem_align(&self) -> usize {
180        self.inner
181            .collect_storage_dependencies()
182            .into_iter()
183            .fold(1, |max, s| cmp::max(max, s.mem_align()))
184    }
185
186    /// Read the data from the given mapping.
187    async fn read_chunk(
188        &self,
189        mut bufv: IoVectorMut<'_>,
190        mapping: Mapping<'_, S>,
191    ) -> io::Result<()> {
192        match mapping {
193            Mapping::Raw {
194                storage,
195                offset,
196                writable: _,
197            } => storage.readv(bufv, offset).await,
198
199            Mapping::Zero { explicit: _ } | Mapping::Eof {} => {
200                bufv.fill(0);
201                Ok(())
202            }
203
204            // FIXME: TOCTTOU problem.  Not sure how to fully fix it, if possible at all.
205            // (Concurrent writes can change the mapping, but the driver will have to reload the
206            // mapping because it cannot pass it in `NonRecursiveMapping::Special`.  It may then
207            // find that this is no longer a “special” range.  Even passing the low-level mapping
208            // information in `Mapping::Special` wouldn’t fully fix it, though: If concurrent
209            // writes change the low-level cluster type, and the driver then tries to e.g.
210            // decompress the data that was there, that may well fail.)
211            Mapping::Special { layer, offset } => layer.inner.readv_special(bufv, offset).await,
212        }
213    }
214
215    /// Return the shallow mapping at `offset`.
216    ///
217    /// Find what `offset` is mapped to, which may be another format layer, return that
218    /// information, and the length of the continuous mapping (from `offset`).
219    ///
220    /// Use [`FormatAccess::get_mapping()`] to recursively fully resolve references to other format
221    /// layers.
222    pub async fn get_shallow_mapping(
223        &self,
224        offset: u64,
225        max_length: u64,
226    ) -> io::Result<(ShallowMapping<'_, S>, u64)> {
227        self.inner
228            .get_mapping(offset, max_length)
229            .await
230            .map(|(m, l)| (m, cmp::min(l, max_length)))
231    }
232
233    /// Return the recursively resolved mapping at `offset`.
234    ///
235    /// Find what `offset` is mapped to, return that mapping information, and the length of that
236    /// continuous mapping (from `offset`).
237    ///
238    /// All data references to other format layers are automatically resolved (recursively), so
239    /// that the result are more “trivial” mappings (unless prevented by special mappings like
240    /// compressed clusters).
241    pub async fn get_mapping(
242        &self,
243        mut offset: u64,
244        mut max_length: u64,
245    ) -> io::Result<(Mapping<'_, S>, u64)> {
246        let mut format_layer = self;
247        let mut writable_gate = true;
248
249        loop {
250            let (mapping, length) = format_layer.get_shallow_mapping(offset, max_length).await?;
251
252            match mapping {
253                ShallowMapping::Raw {
254                    storage,
255                    offset,
256                    writable,
257                } => {
258                    return Ok((
259                        Mapping::Raw {
260                            storage,
261                            offset,
262                            writable: writable && writable_gate,
263                        },
264                        length,
265                    ))
266                }
267
268                ShallowMapping::Indirect {
269                    layer: recurse_layer,
270                    offset: recurse_offset,
271                    writable: recurse_writable,
272                } => {
273                    format_layer = recurse_layer;
274                    offset = recurse_offset;
275                    writable_gate = recurse_writable;
276                    max_length = length;
277                }
278
279                ShallowMapping::Zero { explicit } => {
280                    // If this is not the top layer, always clear `explicit`
281                    return if explicit && ptr::eq(format_layer, self) {
282                        Ok((Mapping::Zero { explicit: true }, length))
283                    } else {
284                        Ok((Mapping::Zero { explicit: false }, length))
285                    };
286                }
287
288                ShallowMapping::Eof {} => {
289                    // Return EOF only on top layer, zero otherwise
290                    return if ptr::eq(format_layer, self) {
291                        Ok((Mapping::Eof {}, 0))
292                    } else {
293                        Ok((Mapping::Zero { explicit: false }, max_length))
294                    };
295                }
296
297                ShallowMapping::Special { offset } => {
298                    return Ok((
299                        Mapping::Special {
300                            layer: format_layer,
301                            offset,
302                        },
303                        length,
304                    ));
305                }
306            }
307        }
308    }
309
310    /// Create a raw data mapping at `offset`.
311    ///
312    /// Ensure that `offset` is directly mapped to some storage object, up to a length of `length`.
313    /// Return the storage object, the corresponding offset there, and the continuous length that
314    /// we were able to map (less than or equal to `length`).
315    ///
316    /// If `overwrite` is true, the contents in the range are supposed to be overwritten and may be
317    /// discarded.  Otherwise, they are kept.
318    pub async fn ensure_data_mapping(
319        &self,
320        offset: u64,
321        length: u64,
322        overwrite: bool,
323    ) -> io::Result<(&S, u64, u64)> {
324        let (storage, mapped_offset, mapped_length) = self
325            .inner
326            .ensure_data_mapping(offset, length, overwrite)
327            .await?;
328        let mapped_length = cmp::min(length, mapped_length);
329        assert!(mapped_length > 0);
330        Ok((storage, mapped_offset, mapped_length))
331    }
332
333    /// Read data at `offset` into `bufv`.
334    ///
335    /// Reads until `bufv` is filled completely, i.e. will not do short reads.  When reaching the
336    /// end of file, the rest of `bufv` is filled with 0.
337    pub async fn readv(&self, mut bufv: IoVectorMut<'_>, mut offset: u64) -> io::Result<()> {
338        let mut workers = (self.read_parallelization > 1).then(FutureVector::new);
339
340        while !bufv.is_empty() {
341            let (mapping, chunk_length) = self.get_mapping(offset, bufv.len()).await?;
342            if chunk_length == 0 {
343                assert!(mapping.is_eof());
344                bufv.fill(0);
345                break;
346            }
347
348            if let Some(workers) = workers.as_mut() {
349                while workers.len() >= self.read_parallelization {
350                    workers.select().await?;
351                }
352            }
353
354            let (chunk, remainder) = bufv.split_at(chunk_length);
355            bufv = remainder;
356            offset += chunk_length;
357
358            if let Some(workers) = workers.as_mut() {
359                workers.push(Box::pin(self.read_chunk(chunk, mapping)));
360            } else {
361                self.read_chunk(chunk, mapping).await?;
362            }
363        }
364
365        if let Some(mut workers) = workers {
366            workers.discarding_join().await?;
367        }
368
369        Ok(())
370    }
371
372    /// Read data at `offset` into `buf`.
373    ///
374    /// Reads until `buf` is filled completely, i.e. will not do short reads.  When reaching the
375    /// end of file, the rest of `buf` is filled with 0.
376    pub async fn read(&self, buf: impl Into<IoVectorMut<'_>>, offset: u64) -> io::Result<()> {
377        self.readv(buf.into(), offset).await
378    }
379
380    /// Write data from `bufv` to `offset`.
381    ///
382    /// Writes all data from `bufv` (or returns an error), i.e. will not do short writes.  Reaching
383    /// the end of file before the end of the buffer results in an error.
384    pub async fn writev(&self, mut bufv: IoVector<'_>, mut offset: u64) -> io::Result<()> {
385        if !self.writable {
386            return Err(io::Error::other("Image is read-only"));
387        }
388
389        // Limit to disk size
390        let disk_size = self.inner.size();
391        if offset >= disk_size {
392            return Ok(());
393        }
394        if bufv.len() > disk_size - offset {
395            bufv = bufv.split_at(disk_size - offset).0;
396        }
397
398        let mut workers = (self.write_parallelization > 1).then(FutureVector::new);
399
400        while !bufv.is_empty() {
401            let (storage, st_offset, st_length) =
402                self.ensure_data_mapping(offset, bufv.len(), true).await?;
403
404            if let Some(workers) = workers.as_mut() {
405                while workers.len() >= self.write_parallelization {
406                    workers.select().await?;
407                }
408            }
409
410            let (chunk, remainder) = bufv.split_at(st_length);
411            bufv = remainder;
412            offset += st_length;
413
414            if let Some(workers) = workers.as_mut() {
415                workers.push(Box::pin(storage.writev(chunk, st_offset)));
416            } else {
417                storage.writev(chunk, st_offset).await?;
418            }
419        }
420
421        if let Some(mut workers) = workers {
422            workers.discarding_join().await?;
423        }
424
425        Ok(())
426    }
427
428    /// Write data from `buf` to `offset`.
429    ///
430    /// Writes all data from `bufv` (or returns an error), i.e. will not do short writes.  Reaching
431    /// the end of file before the end of the buffer results in an error.
432    pub async fn write(&self, buf: impl Into<IoVector<'_>>, offset: u64) -> io::Result<()> {
433        self.writev(buf.into(), offset).await
434    }
435
436    /// Check whether the given range is zero.
437    ///
438    /// Checks for zero mappings, not zero data (although this might be changed in the future).
439    ///
440    /// Errors are treated as non-zero areas.
441    async fn is_range_zero(&self, mut offset: u64, mut length: u64) -> bool {
442        while length > 0 {
443            match self.get_mapping(offset, length).await {
444                Ok((Mapping::Zero { explicit: _ }, mlen)) => {
445                    offset += mlen;
446                    length -= mlen;
447                }
448                _ => return false,
449            };
450        }
451
452        true
453    }
454
455    /// Ensure the given range reads as zeroes, without write-zeroes support.
456    ///
457    /// Does not require support for efficient zeroing, instead writing zeroes when the range is
458    /// not zero yet.  If `allocate` is true, areas that are not currently allocated will be
459    /// allocated to write zeroes there; if it is false, unallocated areas that currently read as
460    /// zero are left alone.
461    ///
462    /// However, can still use efficient zero support if present.
463    ///
464    /// The main use case is to handle unaligned zero requests.  Quite inefficient for large areas.
465    async fn soft_ensure_zero(&self, mut offset: u64, mut length: u64) -> io::Result<()> {
466        // “Fast” path: Try to efficiently zero as much as possible
467        if let Some(gran) = self.inner.zero_granularity() {
468            let end = offset.checked_add(length).ok_or_else(|| {
469                io::Error::new(
470                    io::ErrorKind::InvalidInput,
471                    format!("Write-zero wrap-around: {offset} + {length}"),
472                )
473            })?;
474            let mut aligned_start = offset - offset % gran;
475            // Could be handled, but don’t bother
476            let mut aligned_end = end.checked_next_multiple_of(gran).ok_or_else(|| {
477                io::Error::new(
478                    io::ErrorKind::InvalidInput,
479                    "Write-zero wrap-around at cluster granularity",
480                )
481            })?;
482
483            aligned_end = cmp::min(aligned_end, self.size());
484
485            // Whether the whole area could be efficiently zeroed
486            let mut fully_zeroed = true;
487
488            if offset > aligned_start
489                && !self
490                    .is_range_zero(aligned_start, offset - aligned_start)
491                    .await
492            {
493                // Non-zero head, we cannot zero that cluster.  Still try to zero as much as
494                // possible.
495                fully_zeroed = false;
496                aligned_start += gran;
497            }
498            if end < aligned_end && !self.is_range_zero(end, aligned_end - end).await {
499                // Non-zero tail, we cannot zero that cluster.  Still try to zero as much as
500                // possible.
501                fully_zeroed = false;
502                aligned_end -= gran;
503            }
504
505            while aligned_start < aligned_end {
506                let res = self
507                    .inner
508                    .ensure_zero_mapping(aligned_start, aligned_end - aligned_start)
509                    .await;
510                if let Ok((zofs, zlen)) = res {
511                    if zofs != aligned_start || zlen == 0 {
512                        // Produced a gap, so will need to fall back, but still try to zero as
513                        // much as possible
514                        fully_zeroed = false;
515                        if zlen == 0 {
516                            // Cannot go on
517                            break;
518                        }
519                    }
520                    aligned_start = zofs + zlen;
521                } else {
522                    // Ignore errors, just fall back
523                    fully_zeroed = false;
524                    break;
525                }
526            }
527
528            if fully_zeroed {
529                // Everything zeroed, no need to check
530                return Ok(());
531            }
532        }
533
534        // Slow path: Everything that is not zero in this layer is allocated as data and zeroes are
535        // written.  The more we zeroed in the fast path, the quicker this will be.
536        while length > 0 {
537            let (mapping, mlen) = self.inner.get_mapping(offset, length).await?;
538            let mlen = cmp::min(mlen, length);
539
540            let mapping = match mapping {
541                ShallowMapping::Raw {
542                    storage,
543                    offset,
544                    writable,
545                } => writable.then_some((storage, offset)),
546                // For already zero clusters, we don’t need to do anything
547                ShallowMapping::Zero { explicit: true } => {
548                    // Nothing to be done
549                    offset += mlen;
550                    length -= mlen;
551                    continue;
552                }
553                // For unallocated clusters, we should establish zero data
554                ShallowMapping::Zero { explicit: false }
555                | ShallowMapping::Indirect {
556                    layer: _,
557                    offset: _,
558                    writable: _,
559                } => None,
560                ShallowMapping::Eof {} => {
561                    return Err(io::ErrorKind::UnexpectedEof.into());
562                }
563                ShallowMapping::Special { offset: _ } => None,
564            };
565
566            let (file, mofs, mlen) = if let Some((file, mofs)) = mapping {
567                (file, mofs, mlen)
568            } else {
569                self.ensure_data_mapping(offset, mlen, true).await?
570            };
571
572            write_full_zeroes(file, mofs, mlen).await?;
573            offset += mlen;
574            length -= mlen;
575        }
576
577        Ok(())
578    }
579
580    /// Ensure the given range reads as zeroes.
581    ///
582    /// May use efficient zeroing for a subset of the given range, if supported by the format.
583    /// Will not discard anything, which keeps existing data mappings usable, albeit writing to
584    /// mappings that are now zeroed may have no effect.
585    ///
586    /// Check if [`FormatAccess::discard_to_zero()`] better suits your needs: It may work better on
587    /// a wider range of formats (`write_zeroes()` requires support for preallocated zero clusters,
588    /// which qcow2 does have, but other formats may not), and can actually free up space.
589    /// However, because it can break existing data mappings, it requires a mutable `self`
590    /// reference.
591    pub async fn write_zeroes(&self, mut offset: u64, length: u64) -> io::Result<()> {
592        let max_offset = offset.checked_add(length).ok_or_else(|| {
593            io::Error::new(io::ErrorKind::InvalidInput, "Write-zeroes range overflow")
594        })?;
595
596        while offset < max_offset {
597            let (zofs, zlen) = self
598                .inner
599                .ensure_zero_mapping(offset, max_offset - offset)
600                .await?;
601            if zlen == 0 {
602                break;
603            }
604            // Fill up head, i.e. the range [offset, zofs)
605            self.soft_ensure_zero(offset, zofs - offset).await?;
606            offset = zofs + zlen;
607        }
608
609        // Fill up tail, i.e. the remaining range [offset, max_offset)
610        self.soft_ensure_zero(offset, max_offset - offset).await?;
611        Ok(())
612    }
613
614    /// Discard the given range, ensure it is read back as zeroes.
615    ///
616    /// Effectively the same as [`FormatAccess::write_zeroes()`], but discard as much of the
617    /// existing allocation as possible.  This breaks existing data mappings, so needs a mutable
618    /// reference to `self`, which ensures that existing data references (which have the lifetime
619    /// of an immutable `self` reference) cannot be kept.
620    ///
621    /// Areas that cannot be discarded (because of format-inherent alignment restrictions) are
622    /// still overwritten with zeroes, unless discarding is not supported altogether.
623    pub async fn discard_to_zero(&mut self, mut offset: u64, length: u64) -> io::Result<()> {
624        let max_offset = offset.checked_add(length).ok_or_else(|| {
625            io::Error::new(
626                io::ErrorKind::InvalidInput,
627                "Discard-to-zero range overflow",
628            )
629        })?;
630
631        while offset < max_offset {
632            let (zofs, zlen) = self
633                .inner
634                .discard_to_zero(offset, max_offset - offset)
635                .await?;
636            if zlen == 0 {
637                break;
638            }
639            // Fill up head, i.e. the range [offset, zofs)
640            self.soft_ensure_zero(offset, zofs - offset).await?;
641            offset = zofs + zlen;
642        }
643
644        // Fill up tail, i.e. the remaining range [offset, max_offset)
645        self.soft_ensure_zero(offset, max_offset - offset).await?;
646        Ok(())
647    }
648
649    /// Discard the given range, not guaranteeing specific data on read-back.
650    ///
651    /// Discard as much of the given range as possible, and keep the rest as-is.  Does not
652    /// guarantee any specific data on read-back, in contrast to
653    /// [`FormatAccess::discard_to_zero()`].
654    ///
655    /// Discarding being unsupported by this format is still returned as an error
656    /// ([`std::io::ErrorKind::Unsupported`])
657    pub async fn discard_to_any(&mut self, mut offset: u64, length: u64) -> io::Result<()> {
658        let max_offset = offset.checked_add(length).ok_or_else(|| {
659            io::Error::new(io::ErrorKind::InvalidInput, "Discard-to-any range overflow")
660        })?;
661
662        while offset < max_offset {
663            let (dofs, dlen) = self
664                .inner
665                .discard_to_any(offset, max_offset - offset)
666                .await?;
667            if dlen == 0 {
668                break;
669            }
670            offset = dofs + dlen;
671        }
672
673        Ok(())
674    }
675
676    /// Discard the given range, such that the backing image becomes visible.
677    ///
678    /// Discard as much of the given range as possible so that a backing image’s data becomes
679    /// visible, and keep the rest as-is.  This breaks existing data mappings, so needs a mutable
680    /// reference to `self`, which ensures that existing data references (which have the lifetime
681    /// of an immutable `self` reference) cannot be kept.
682    pub async fn discard_to_backing(&mut self, mut offset: u64, length: u64) -> io::Result<()> {
683        let max_offset = offset.checked_add(length).ok_or_else(|| {
684            io::Error::new(
685                io::ErrorKind::InvalidInput,
686                "Discard-to-backing range overflow",
687            )
688        })?;
689
690        while offset < max_offset {
691            let (dofs, dlen) = self
692                .inner
693                .discard_to_backing(offset, max_offset - offset)
694                .await?;
695            if dlen == 0 {
696                break;
697            }
698            offset = dofs + dlen;
699        }
700
701        Ok(())
702    }
703
704    /// Flush internal buffers.  Always call this before drop!
705    ///
706    /// Does not necessarily sync those buffers to disk.  When using `flush()`, consider whether
707    /// you want to call `sync()` afterwards.
708    ///
709    /// Because of the current lack of stable `async_drop`, you must manually call this before
710    /// dropping a `FormatAccess` instance!  (Not necessarily for read-only images, though.)
711    ///
712    /// Note that this will not drop the buffers, so they may still be used to serve later
713    /// accesses.  Use [`FormatAccess::invalidate_cache()`] to drop all buffers.
714    pub async fn flush(&self) -> io::Result<()> {
715        self.inner.flush().await
716    }
717
718    /// Sync data already written to the storage hardware.
719    ///
720    /// This does not necessarily include flushing internal buffers, i.e. `flush`.  When using
721    /// `sync()`, consider whether you want to call `flush()` before it.
722    pub async fn sync(&self) -> io::Result<()> {
723        self.inner.sync().await
724    }
725
726    /// Drop internal buffers.
727    ///
728    /// This drops all internal buffers, but does not flush them!  All cached data is reloaded from
729    /// disk on subsequent accesses.
730    ///
731    /// # Safety
732    /// Not flushing internal buffers may cause image corruption.  You must ensure the on-disk
733    /// state is consistent.
734    pub async unsafe fn invalidate_cache(&self) -> io::Result<()> {
735        // Safety ensured by caller
736        unsafe { self.inner.invalidate_cache() }.await
737    }
738
739    /// Resize to the given size.
740    ///
741    /// Set the disk size to `new_size`.  If `new_size` is smaller than the current size, ignore
742    /// both preallocation modes and discard the data after `new_size`.
743    ///
744    /// If `new_size` is larger than the current size, `prealloc_mode` determines whether and how
745    /// the new range should be allocated; depending on the image format, is possible some
746    /// preallocation modes are not supported, in which case an [`std::io::ErrorKind::Unsupported`]
747    /// is returned.
748    ///
749    /// This may break existing data mappings, so needs a mutable reference to `self`, which
750    /// ensures that existing data references (which have the lifetime of an immutable `self`
751    /// reference) cannot be kept.
752    ///
753    /// See also [`FormatAccess::resize_grow()`] and [`FormatAccess::resize_shrink()`], whose more
754    /// specialized interface may be useful when you know whether you want to grow or shrink the
755    /// image.
756    pub async fn resize(
757        &mut self,
758        new_size: u64,
759        prealloc_mode: PreallocateMode,
760    ) -> io::Result<()> {
761        match new_size.cmp(&self.size()) {
762            std::cmp::Ordering::Less => self.resize_shrink(new_size).await,
763            std::cmp::Ordering::Equal => Ok(()),
764            std::cmp::Ordering::Greater => self.resize_grow(new_size, prealloc_mode).await,
765        }
766    }
767
768    /// Resize to the given size, which must be greater than the current size.
769    ///
770    /// Set the disk size to `new_size`, preallocating the new space according to `prealloc_mode`.
771    /// Depending on the image format, it is possible some preallocation modes are not supported,
772    /// in which case an [`std::io::ErrorKind::Unsupported`] is returned.
773    ///
774    /// If the current size is already `new_size` or greater, do nothing.
775    pub async fn resize_grow(
776        &self,
777        new_size: u64,
778        prealloc_mode: PreallocateMode,
779    ) -> io::Result<()> {
780        self.inner.resize_grow(new_size, prealloc_mode).await
781    }
782
783    /// Truncate to the given size, which must be smaller than the current size.
784    ///
785    /// Set the disk size to `new_size`, discarding the data after `new_size`.
786    ///
787    /// May break existing data mappings thanks to the mutable `self` reference.
788    ///
789    /// If the current size is already `new_size` or smaller, do nothing.
790    pub async fn resize_shrink(&mut self, new_size: u64) -> io::Result<()> {
791        self.inner.resize_shrink(new_size).await
792    }
793}
794
795impl<S: Storage> Mapping<'_, S> {
796    /// Return `true` if and only if this mapping signifies the end of file.
797    pub fn is_eof(&self) -> bool {
798        matches!(self, Mapping::Eof {})
799    }
800}
801
802impl<S: Storage> Display for FormatAccess<S> {
803    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
804        self.inner.fmt(f)
805    }
806}
807
808impl<S: Storage> Display for Mapping<'_, S> {
809    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
810        match self {
811            Mapping::Raw {
812                storage,
813                offset,
814                writable,
815            } => {
816                let writable = if *writable { "rw" } else { "ro" };
817                write!(f, "{storage}:0x{offset:x}/{writable}")
818            }
819
820            Mapping::Zero { explicit } => {
821                let explicit = if *explicit { "explicit" } else { "unallocated" };
822                write!(f, "<zero:{explicit}>")
823            }
824
825            Mapping::Eof {} => write!(f, "<eof>"),
826
827            Mapping::Special { layer, offset } => {
828                write!(f, "<special:{layer}:0x{offset:x}>")
829            }
830        }
831    }
832}
833
834/*
835#[cfg(feature = "async-drop")]
836impl<S: Storage> std::future::AsyncDrop for FormatAccess<S> {
837    type Dropper<'a> = std::pin::Pin<Box<dyn std::future::Future<Output = ()> + 'a>> where S: 'a;
838
839    fn async_drop(self: std::pin::Pin<&mut Self>) -> Self::Dropper<'_> {
840        Box::pin(async move {
841            if let Err(err) = self.flush().await {
842                let inner = &self.inner;
843                tracing::error!("Failed to flush {inner}: {err}");
844            }
845        })
846    }
847}
848*/