imago/qcow2/
io_func.rs

1//! Special I/O functions.
2//!
3//! Most of I/O should be implemented in the generic
4//! [`imago::format::access`](crate::format::access) module, but some I/O needs to be done directly
5//! by image drivers (like handling compression).
6
7use super::*;
8use crate::io_buffers::IoBuffer;
9
10impl<S: Storage, F: WrappedFormat<S>> Qcow2<S, F> {
11    /// Read the special range at `offset`.
12    ///
13    /// Currently, the only special range we have are compressed clusters.
14    pub(super) async fn do_readv_special(
15        &self,
16        mut bufv: IoVectorMut<'_>,
17        mut offset: GuestOffset,
18    ) -> io::Result<()> {
19        let mut saved_l2_table: Option<Arc<L2Table>> = None;
20        let cb = self.header.cluster_bits();
21
22        // Do everything cluster by cluster.
23        while !bufv.is_empty() {
24            let l2_table = if let Some(saved) = saved_l2_table.as_ref() {
25                saved
26            } else {
27                let new_l2 = self
28                    .get_l2(offset, false)
29                    .await?
30                    .ok_or(io::ErrorKind::Other)?;
31                saved_l2_table.get_or_insert(new_l2)
32            };
33
34            let chunk_length = offset.remaining_in_cluster(cb);
35            let (chunk, remainder) = bufv.split_at(chunk_length);
36            bufv = remainder;
37
38            let mut bounce_buffer_and_chunk = None;
39            let need_bounce_buffer = chunk.buffer_count() != 1
40                || offset.in_cluster_offset(cb) != 0
41                || chunk.len() != self.header.cluster_size() as u64;
42
43            let slice = if need_bounce_buffer {
44                let bounce_buffer = IoBuffer::new(self.header.cluster_size(), 1)?;
45                bounce_buffer_and_chunk = Some((bounce_buffer, chunk));
46                bounce_buffer_and_chunk.as_mut().unwrap().0.as_mut()
47            } else {
48                chunk.into_inner().pop().unwrap().into()
49            };
50
51            let guest_cluster = offset.cluster(cb);
52            match l2_table.get_mapping(guest_cluster)? {
53                L2Mapping::Compressed {
54                    host_offset,
55                    length,
56                } => {
57                    self.read_compressed_cluster(slice.into_slice(), host_offset, length)
58                        .await?;
59                }
60
61                _ => return Err(io::ErrorKind::Other.into()),
62            }
63
64            if let Some((bounce_buffer, mut chunk)) = bounce_buffer_and_chunk {
65                let ofs = offset.in_cluster_offset(cb);
66                let end = ofs + chunk.len() as usize;
67                chunk.copy_from_slice(bounce_buffer.as_ref_range(ofs..end).into_slice());
68            }
69
70            let next_cluster = if let Some(next) = guest_cluster.next_in_l2(cb) {
71                next
72            } else {
73                saved_l2_table.take();
74                guest_cluster.first_in_next_l2(cb)
75            };
76            offset = next_cluster.offset(cb);
77        }
78
79        Ok(())
80    }
81}