1use super::*;
6use crate::io_buffers::IoBuffer;
7
8impl<S: Storage, F: WrappedFormat<S>> Qcow2<S, F> {
9 pub(super) async fn cow_cluster(
39 &self,
40 cluster: GuestCluster,
41 mandatory_host_cluster: Option<HostCluster>,
42 partial_skip_cow: Option<Range<usize>>,
43 l2_table: &mut L2TableWriteGuard<'_>,
44 leaked_allocations: &mut Vec<(HostCluster, ClusterCount)>,
45 ) -> io::Result<Option<HostCluster>> {
46 let full_skip_cow = if let Some(skip) = partial_skip_cow.as_ref() {
48 skip.start == 0 && skip.end == self.header.cluster_size()
49 } else {
50 false
51 };
52
53 let existing_mapping = l2_table.get_mapping(cluster)?;
54 if let L2Mapping::DataFile {
55 host_cluster,
56 copied: true,
57 } = existing_mapping
58 {
59 if let Some(mandatory_host_cluster) = mandatory_host_cluster {
60 if host_cluster != mandatory_host_cluster {
61 return Ok(None);
62 }
63 }
64 return Ok(Some(host_cluster));
65 };
66
67 self.need_writable()?;
68
69 let new_cluster = if let L2Mapping::Zero {
70 host_cluster: Some(host_cluster),
71 copied: true,
72 } = existing_mapping
73 {
74 if let Some(mandatory_host_cluster) = mandatory_host_cluster {
75 if host_cluster == mandatory_host_cluster {
76 Some(host_cluster)
77 } else {
78 self.allocate_data_cluster_at(cluster, Some(mandatory_host_cluster))
80 .await?
81 }
82 } else {
83 Some(host_cluster)
84 }
85 } else {
86 self.allocate_data_cluster_at(cluster, mandatory_host_cluster)
87 .await?
88 };
89 let Some(new_cluster) = new_cluster else {
90 return Ok(None);
92 };
93
94 if !full_skip_cow {
95 match existing_mapping {
96 L2Mapping::DataFile {
97 host_cluster: _,
98 copied: true,
99 } => unreachable!(),
100
101 L2Mapping::DataFile {
102 host_cluster,
103 copied: false,
104 } => {
105 self.cow_copy_storage(
106 self.storage(),
107 host_cluster,
108 new_cluster,
109 partial_skip_cow,
110 )
111 .await?
112 }
113
114 L2Mapping::Backing { backing_offset } => {
115 if let Some(backing) = self.backing.as_ref() {
116 self.cow_copy_format(backing, backing_offset, new_cluster, partial_skip_cow)
117 .await?
118 } else {
119 self.cow_zero(new_cluster, partial_skip_cow).await?
120 }
121 }
122
123 L2Mapping::Zero {
124 host_cluster: _,
125 copied: _,
126 } => self.cow_zero(new_cluster, partial_skip_cow).await?,
127
128 L2Mapping::Compressed {
129 host_offset,
130 length,
131 } => {
132 self.cow_compressed(host_offset, length, new_cluster)
133 .await?
134 }
135 }
136 }
137
138 let l2i = cluster.l2_index(self.header.cluster_bits());
139 if let Some(leaked) = l2_table.map_cluster(l2i, new_cluster) {
140 leaked_allocations.push(leaked);
141 }
142
143 Ok(Some(new_cluster))
144 }
145
146 fn get_cow_range(
154 &self,
155 partial_skip_cow: Option<Range<usize>>,
156 alignment: usize,
157 ) -> Option<Range<usize>> {
158 let mut copy_range = 0..self.header.cluster_size();
159 if let Some(partial_skip_cow) = partial_skip_cow {
160 if partial_skip_cow.start == copy_range.start {
161 copy_range.start = partial_skip_cow.end;
162 } else if partial_skip_cow.end == copy_range.end {
163 copy_range.end = partial_skip_cow.start;
164 }
165 }
166
167 if copy_range.is_empty() {
168 return None;
169 }
170
171 let alignment = cmp::min(alignment, self.header.cluster_size());
172 debug_assert!(alignment.is_power_of_two());
173 let mask = alignment - 1;
174
175 if copy_range.start & mask != 0 {
176 copy_range.start &= !mask;
177 }
178 if copy_range.end & mask != 0 {
179 copy_range.end = (copy_range.end & !mask) + alignment;
180 }
181
182 Some(copy_range)
183 }
184
185 async fn cow_copy_storage(
189 &self,
190 from: &S,
191 from_cluster: HostCluster,
192 to_cluster: HostCluster,
193 partial_skip_cow: Option<Range<usize>>,
194 ) -> io::Result<()> {
195 let to = self.storage();
196
197 let align = cmp::max(from.req_align(), to.req_align());
198 let Some(cow_range) = self.get_cow_range(partial_skip_cow, align) else {
199 return Ok(());
200 };
201
202 let mut buf = IoBuffer::new(cow_range.end - cow_range.start, from.mem_align())?;
203
204 let cb = self.header.cluster_bits();
205 let from_offset = from_cluster.offset(cb);
206 let to_offset = to_cluster.offset(cb);
207
208 from.read(&mut buf, from_offset.0 + cow_range.start as u64)
209 .await?;
210
211 to.write(&buf, to_offset.0 + cow_range.start as u64).await?;
212
213 Ok(())
214 }
215
216 async fn cow_copy_format(
220 &self,
221 from: &F,
222 from_offset: u64,
223 to_cluster: HostCluster,
224 partial_skip_cow: Option<Range<usize>>,
225 ) -> io::Result<()> {
226 let to = self.storage();
227 let from = from.inner();
228
229 let align = cmp::max(from.req_align(), to.req_align());
230 let Some(cow_range) = self.get_cow_range(partial_skip_cow, align) else {
231 return Ok(());
232 };
233
234 let mut buf = IoBuffer::new(cow_range.end - cow_range.start, from.mem_align())?;
235
236 let to_offset = to_cluster.offset(self.header.cluster_bits());
237
238 from.read(&mut buf, from_offset + cow_range.start as u64)
239 .await?;
240
241 to.write(&buf, to_offset.0 + cow_range.start as u64).await?;
242
243 Ok(())
244 }
245
246 async fn cow_zero(
250 &self,
251 to_cluster: HostCluster,
252 partial_skip_cow: Option<Range<usize>>,
253 ) -> io::Result<()> {
254 let to = self.storage();
255
256 let align = to.req_align();
257 let Some(cow_range) = self.get_cow_range(partial_skip_cow, align) else {
258 return Ok(());
259 };
260
261 let to_offset = to_cluster.offset(self.header.cluster_bits());
262 to.write_zeroes(
263 to_offset.0 + cow_range.start as u64,
264 (cow_range.end - cow_range.start) as u64,
265 )
266 .await?;
267
268 Ok(())
269 }
270
271 async fn cow_compressed(
275 &self,
276 compressed_offset: HostOffset,
277 compressed_length: u64,
278 to_cluster: HostCluster,
279 ) -> io::Result<()> {
280 let to = self.storage();
281
282 let mut buf = IoBuffer::new(self.header.cluster_size(), to.mem_align())?;
283 self.read_compressed_cluster(
284 buf.as_mut().into_slice(),
285 compressed_offset,
286 compressed_length,
287 )
288 .await?;
289
290 let to_offset = to_cluster.offset(self.header.cluster_bits());
291 to.write(&buf, to_offset.0).await?;
292
293 Ok(())
294 }
295}