1 use super::{BorrowedBuf, BufReader, BufWriter, Read, Result, Write, DEFAULT_BUF_SIZE}; 2 use crate::std::cmp; 3 use crate::std::collections::VecDeque; 4 use crate::std::io::IoSlice; 5 use crate::std::mem::MaybeUninit; 6 use core::alloc::Allocator; 7 8 #[cfg(test)] 9 mod tests; 10 11 /// Copies the entire contents of a reader into a writer. 12 /// 13 /// This function will continuously read data from `reader` and then 14 /// write it into `writer` in a streaming fashion until `reader` 15 /// returns EOF. 16 /// 17 /// On success, the total number of bytes that were copied from 18 /// `reader` to `writer` is returned. 19 /// 20 /// If you want to copy the contents of one file to another and you’re 21 /// working with filesystem paths, see the [`fs::copy`] function. 22 /// 23 /// [`fs::copy`]: crate::std::fs::copy 24 /// 25 /// # Errors 26 /// 27 /// This function will return an error immediately if any call to [`read`] or 28 /// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are 29 /// handled by this function and the underlying operation is retried. 30 /// 31 /// [`read`]: Read::read 32 /// [`write`]: Write::write 33 /// [`ErrorKind::Interrupted`]: crate::std::io::ErrorKind::Interrupted 34 /// 35 /// # Examples 36 /// 37 /// ``` 38 /// use std::io; 39 /// 40 /// fn main() -> io::Result<()> { 41 /// let mut reader: &[u8] = b"hello"; 42 /// let mut writer: Vec<u8> = vec![]; 43 /// 44 /// io::copy(&mut reader, &mut writer)?; 45 /// 46 /// assert_eq!(&b"hello"[..], &writer[..]); 47 /// Ok(()) 48 /// } 49 /// ``` 50 /// 51 /// # Platform-specific behavior 52 /// 53 /// On Linux (including Android), this function uses `copy_file_range(2)`, 54 /// `sendfile(2)` or `splice(2)` syscalls to move data directly between file 55 /// descriptors if possible. 56 /// 57 /// Note that platform-specific behavior [may change in the future][changes]. 58 /// 59 /// [changes]: crate::std::io#platform-specific-behavior 60 pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64> 61 where 62 R: Read, 63 W: Write, 64 { 65 cfg_if::cfg_if! { 66 if #[cfg(any(target_os = "linux", target_os = "android"))] { 67 crate::std::sys::kernel_copy::copy_spec(reader, writer) 68 } else { 69 generic_copy(reader, writer) 70 } 71 } 72 } 73 74 /// The userspace read-write-loop implementation of `io::copy` that is used when 75 /// OS-specific specializations for copy offloading are not available or not applicable. 76 pub(crate) fn generic_copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64> 77 where 78 R: Read, 79 W: Write, 80 { 81 let read_buf = BufferedReaderSpec::buffer_size(reader); 82 let write_buf = BufferedWriterSpec::buffer_size(writer); 83 84 if read_buf >= DEFAULT_BUF_SIZE && read_buf >= write_buf { 85 return BufferedReaderSpec::copy_to(reader, writer); 86 } 87 88 BufferedWriterSpec::copy_from(writer, reader) 89 } 90 91 /// Specialization of the read-write loop that reuses the internal 92 /// buffer of a BufReader. If there's no buffer then the writer side 93 /// should be used instead. 94 trait BufferedReaderSpec { 95 fn buffer_size(&self) -> usize; 96 97 fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64>; 98 } 99 100 impl<T> BufferedReaderSpec for T 101 where 102 Self: Read, 103 T: ?Sized, 104 { 105 #[inline] 106 default fn buffer_size(&self) -> usize { 107 0 108 } 109 110 default fn copy_to(&mut self, _to: &mut (impl Write + ?Sized)) -> Result<u64> { 111 unreachable!("only called from specializations") 112 } 113 } 114 115 impl BufferedReaderSpec for &[u8] { 116 fn buffer_size(&self) -> usize { 117 // prefer this specialization since the source "buffer" is all we'll ever need, 118 // even if it's small 119 usize::MAX 120 } 121 122 fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> { 123 let len = self.len(); 124 to.write_all(self)?; 125 *self = &self[len..]; 126 Ok(len as u64) 127 } 128 } 129 130 impl<A: Allocator> BufferedReaderSpec for VecDeque<u8, A> { 131 fn buffer_size(&self) -> usize { 132 // prefer this specialization since the source "buffer" is all we'll ever need, 133 // even if it's small 134 usize::MAX 135 } 136 137 fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> { 138 let len = self.len(); 139 let (front, back) = self.as_slices(); 140 let bufs = &mut [IoSlice::new(front), IoSlice::new(back)]; 141 to.write_all_vectored(bufs)?; 142 self.clear(); 143 Ok(len as u64) 144 } 145 } 146 147 impl<I> BufferedReaderSpec for BufReader<I> 148 where 149 Self: Read, 150 I: ?Sized, 151 { 152 fn buffer_size(&self) -> usize { 153 self.capacity() 154 } 155 156 fn copy_to(&mut self, to: &mut (impl Write + ?Sized)) -> Result<u64> { 157 let mut len = 0; 158 159 loop { 160 // Hack: this relies on `impl Read for BufReader` always calling fill_buf 161 // if the buffer is empty, even for empty slices. 162 // It can't be called directly here since specialization prevents us 163 // from adding I: Read 164 match self.read(&mut []) { 165 Ok(_) => {} 166 Err(e) if e.is_interrupted() => continue, 167 Err(e) => return Err(e), 168 } 169 let buf = self.buffer(); 170 if self.buffer().len() == 0 { 171 return Ok(len); 172 } 173 174 // In case the writer side is a BufWriter then its write_all 175 // implements an optimization that passes through large 176 // buffers to the underlying writer. That code path is #[cold] 177 // but we're still avoiding redundant memcopies when doing 178 // a copy between buffered inputs and outputs. 179 to.write_all(buf)?; 180 len += buf.len() as u64; 181 self.discard_buffer(); 182 } 183 } 184 } 185 186 /// Specialization of the read-write loop that either uses a stack buffer 187 /// or reuses the internal buffer of a BufWriter 188 trait BufferedWriterSpec: Write { 189 fn buffer_size(&self) -> usize; 190 191 fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64>; 192 } 193 194 impl<W: Write + ?Sized> BufferedWriterSpec for W { 195 #[inline] 196 default fn buffer_size(&self) -> usize { 197 0 198 } 199 200 default fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> { 201 stack_buffer_copy(reader, self) 202 } 203 } 204 205 impl<I: Write + ?Sized> BufferedWriterSpec for BufWriter<I> { 206 fn buffer_size(&self) -> usize { 207 self.capacity() 208 } 209 210 fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> { 211 if self.capacity() < DEFAULT_BUF_SIZE { 212 return stack_buffer_copy(reader, self); 213 } 214 215 let mut len = 0; 216 let mut init = 0; 217 218 loop { 219 let buf = self.buffer_mut(); 220 let mut read_buf: BorrowedBuf<'_> = buf.spare_capacity_mut().into(); 221 222 unsafe { 223 // SAFETY: init is either 0 or the init_len from the previous iteration. 224 read_buf.set_init(init); 225 } 226 227 if read_buf.capacity() >= DEFAULT_BUF_SIZE { 228 let mut cursor = read_buf.unfilled(); 229 match reader.read_buf(cursor.reborrow()) { 230 Ok(()) => { 231 let bytes_read = cursor.written(); 232 233 if bytes_read == 0 { 234 return Ok(len); 235 } 236 237 init = read_buf.init_len() - bytes_read; 238 len += bytes_read as u64; 239 240 // SAFETY: BorrowedBuf guarantees all of its filled bytes are init 241 unsafe { buf.set_len(buf.len() + bytes_read) }; 242 243 // Read again if the buffer still has enough capacity, as BufWriter itself would do 244 // This will occur if the reader returns short reads 245 } 246 Err(ref e) if e.is_interrupted() => {} 247 Err(e) => return Err(e), 248 } 249 } else { 250 self.flush_buf()?; 251 init = 0; 252 } 253 } 254 } 255 } 256 257 impl<A: Allocator> BufferedWriterSpec for Vec<u8, A> { 258 fn buffer_size(&self) -> usize { 259 cmp::max(DEFAULT_BUF_SIZE, self.capacity() - self.len()) 260 } 261 262 fn copy_from<R: Read + ?Sized>(&mut self, reader: &mut R) -> Result<u64> { 263 let mut bytes = 0; 264 265 // avoid allocating before we have determined that there's anything to read 266 if self.capacity() == 0 { 267 bytes = stack_buffer_copy(&mut reader.take(DEFAULT_BUF_SIZE as u64), self)?; 268 if bytes == 0 { 269 return Ok(0); 270 } 271 } 272 273 loop { 274 self.reserve(DEFAULT_BUF_SIZE); 275 let mut buf: BorrowedBuf<'_> = self.spare_capacity_mut().into(); 276 match reader.read_buf(buf.unfilled()) { 277 Ok(()) => {} 278 Err(e) if e.is_interrupted() => continue, 279 Err(e) => return Err(e), 280 }; 281 282 let read = buf.filled().len(); 283 if read == 0 { 284 break; 285 } 286 287 // SAFETY: BorrowedBuf guarantees all of its filled bytes are init 288 // and the number of read bytes can't exceed the spare capacity since 289 // that's what the buffer is borrowing from. 290 unsafe { self.set_len(self.len() + read) }; 291 bytes += read as u64; 292 } 293 294 Ok(bytes) 295 } 296 } 297 298 fn stack_buffer_copy<R: Read + ?Sized, W: Write + ?Sized>( 299 reader: &mut R, 300 writer: &mut W, 301 ) -> Result<u64> { 302 let buf: &mut [_] = &mut [MaybeUninit::uninit(); DEFAULT_BUF_SIZE]; 303 let mut buf: BorrowedBuf<'_> = buf.into(); 304 305 let mut len = 0; 306 307 loop { 308 match reader.read_buf(buf.unfilled()) { 309 Ok(()) => {} 310 Err(e) if e.is_interrupted() => continue, 311 Err(e) => return Err(e), 312 }; 313 314 if buf.filled().is_empty() { 315 break; 316 } 317 318 len += buf.filled().len() as u64; 319 writer.write_all(buf.filled())?; 320 buf.clear(); 321 } 322 323 Ok(len) 324 } 325