//! Types for creating ZIP archives use std::borrow::Cow; use crate::compression::CompressionMethod; use crate::read::{central_header_to_zip_file, find_content, ZipArchive, ZipFile, ZipFileReader}; use crate::result::{ZipError, ZipResult}; use crate::spec; use crate::types::{ffi, AtomicU64, DateTime, System, ZipFileData, DEFAULT_VERSION}; use byteorder::{LittleEndian, WriteBytesExt}; use crc32fast::Hasher; use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; use std::io; use std::io::prelude::*; use std::io::{BufReader, SeekFrom}; use std::mem; #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] use flate2::write::DeflateEncoder; #[cfg(feature = "bzip2")] use bzip2::write::BzEncoder; #[cfg(feature = "time")] use time::OffsetDateTime; #[cfg(feature = "zstd")] use zstd::stream::write::Encoder as ZstdEncoder; enum MaybeEncrypted { Unencrypted(W), Encrypted(crate::zipcrypto::ZipCryptoWriter), } impl Write for MaybeEncrypted { fn write(&mut self, buf: &[u8]) -> io::Result { match self { MaybeEncrypted::Unencrypted(w) => w.write(buf), MaybeEncrypted::Encrypted(w) => w.write(buf), } } fn flush(&mut self) -> io::Result<()> { match self { MaybeEncrypted::Unencrypted(w) => w.flush(), MaybeEncrypted::Encrypted(w) => w.flush(), } } } enum GenericZipWriter { Closed, Storer(MaybeEncrypted), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] Deflater(DeflateEncoder>), #[cfg(feature = "bzip2")] Bzip2(BzEncoder>), #[cfg(feature = "zstd")] Zstd(ZstdEncoder<'static, MaybeEncrypted>), } // Put the struct declaration in a private module to convince rustdoc to display ZipWriter nicely pub(crate) mod zip_writer { use super::*; use std::collections::HashMap; /// ZIP archive generator /// /// Handles the bookkeeping involved in building an archive, and provides an /// API to edit its contents. /// /// ``` /// # fn doit() -> zip_next::result::ZipResult<()> /// # { /// # use zip_next::ZipWriter; /// use std::io::Write; /// use zip_next::write::FileOptions; /// /// // We use a buffer here, though you'd normally use a `File` /// let mut buf = [0; 65536]; /// let mut zip = ZipWriter::new(std::io::Cursor::new(&mut buf[..])); /// /// let options = FileOptions::default().compression_method(zip_next::CompressionMethod::Stored); /// zip.start_file("hello_world.txt", options)?; /// zip.write(b"Hello, World!")?; /// /// // Apply the changes you've made. /// // Dropping the `ZipWriter` will have the same effect, but may silently fail /// zip.finish()?; /// /// # Ok(()) /// # } /// # doit().unwrap(); /// ``` pub struct ZipWriter { pub(super) inner: GenericZipWriter, pub(super) files: Vec, pub(super) files_by_name: HashMap, pub(super) stats: ZipWriterStats, pub(super) writing_to_file: bool, pub(super) writing_raw: bool, pub(super) comment: Vec, } } use crate::result::ZipError::InvalidArchive; use crate::write::GenericZipWriter::{Closed, Storer}; use crate::zipcrypto::ZipCryptoKeys; pub use zip_writer::ZipWriter; #[derive(Default)] struct ZipWriterStats { hasher: Hasher, start: u64, bytes_written: u64, } struct ZipRawValues { crc32: u32, compressed_size: u64, uncompressed_size: u64, } /// Metadata for a file to be written #[derive(Clone, Debug)] pub struct FileOptions<'a> { pub(crate) compression_method: CompressionMethod, pub(crate) compression_level: Option, pub(crate) last_modified_time: DateTime, pub(crate) permissions: Option, pub(crate) large_file: bool, encrypt_with: Option, extra_data: Cow<'a, Vec>, central_extra_data: Cow<'a, Vec>, alignment: u16, } #[cfg(fuzzing)] impl arbitrary::Arbitrary<'_> for FileOptions { fn arbitrary(u: &mut arbitrary::Unstructured) -> arbitrary::Result { let mut options = FileOptions { compression_method: CompressionMethod::arbitrary(u)?, compression_level: Option::::arbitrary(u)?, last_modified_time: DateTime::arbitrary(u)?, permissions: Option::::arbitrary(u)?, large_file: bool::arbitrary(u)?, encrypt_with: Option::::arbitrary(u)?, extra_data: Vec::with_capacity(u16::MAX as usize), central_extra_data: Vec::with_capacity(u16::MAX as usize), alignment: u16::arbitrary(u)?, }; u.arbitrary_loop(Some(0), Some((u16::MAX / 4) as u32), |u| { options .add_extra_data( u16::arbitrary(u)?, &Vec::::arbitrary(u)?, bool::arbitrary(u)?, ) .map_err(|_| arbitrary::Error::IncorrectFormat)?; Ok(core::ops::ControlFlow::Continue(())) })?; Ok(options) } } impl <'a> FileOptions<'a> { /// Set the compression method for the new file /// /// The default is `CompressionMethod::Deflated`. If the deflate compression feature is /// disabled, `CompressionMethod::Stored` becomes the default. #[must_use] pub fn compression_method(mut self, method: CompressionMethod) -> FileOptions<'a> { self.compression_method = method; self } /// Set the compression level for the new file /// /// `None` value specifies default compression level. /// /// Range of values depends on compression method: /// * `Deflated`: 0 - 9. Default is 6 /// * `Bzip2`: 0 - 9. Default is 6 /// * `Zstd`: -7 - 22, with zero being mapped to default level. Default is 3 /// * others: only `None` is allowed #[must_use] pub fn compression_level(mut self, level: Option) -> FileOptions<'a> { self.compression_level = level; self } /// Set the last modified time /// /// The default is the current timestamp if the 'time' feature is enabled, and 1980-01-01 /// otherwise #[must_use] pub fn last_modified_time(mut self, mod_time: DateTime) -> FileOptions<'a> { self.last_modified_time = mod_time; self } /// Set the permissions for the new file. /// /// The format is represented with unix-style permissions. /// The default is `0o644`, which represents `rw-r--r--` for files, /// and `0o755`, which represents `rwxr-xr-x` for directories. /// /// This method only preserves the file permissions bits (via a `& 0o777`) and discards /// higher file mode bits. So it cannot be used to denote an entry as a directory, /// symlink, or other special file type. #[must_use] pub fn unix_permissions(mut self, mode: u32) -> FileOptions<'a> { self.permissions = Some(mode & 0o777); self } /// Set whether the new file's compressed and uncompressed size is less than 4 GiB. /// /// If set to `false` and the file exceeds the limit, an I/O error is thrown and the file is /// aborted. If set to `true`, readers will require ZIP64 support and if the file does not /// exceed the limit, 20 B are wasted. The default is `false`. #[must_use] pub fn large_file(mut self, large: bool) -> FileOptions<'a> { self.large_file = large; self } pub(crate) fn with_deprecated_encryption(mut self, password: &[u8]) -> FileOptions<'a> { self.encrypt_with = Some(ZipCryptoKeys::derive(password)); self } /// Adds an extra data field. pub fn add_extra_data( &mut self, header_id: u16, data: &[u8], central_only: bool, ) -> ZipResult<()> { validate_extra_data(header_id, data)?; let len = data.len() + 4; if self.extra_data.len() + self.central_extra_data.len() + len > u16::MAX as usize { Err(InvalidArchive( "Extra data field would be longer than allowed", )) } else { let field = if central_only { &mut self.central_extra_data } else { &mut self.extra_data }; field.write_u16::(header_id)?; field.write_u16::(data.len() as u16)?; field.write_all(data)?; Ok(()) } } /// Removes the extra data fields. #[must_use] pub fn clear_extra_data(mut self) -> FileOptions<'a> { self.extra_data.clear(); self.central_extra_data.clear(); self } } impl <'a> Default for FileOptions<'a> { /// Construct a new FileOptions object fn default() -> Self { Self { #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] compression_method: CompressionMethod::Deflated, #[cfg(not(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" )))] compression_method: CompressionMethod::Stored, compression_level: None, #[cfg(feature = "time")] last_modified_time: OffsetDateTime::now_utc().try_into().unwrap_or_default(), #[cfg(not(feature = "time"))] last_modified_time: DateTime::default(), permissions: None, large_file: false, encrypt_with: None, extra_data: Cow::Owned(Vec::with_capacity(u16::MAX as usize)), central_extra_data: Cow::Owned(Vec::with_capacity(u16::MAX as usize)), alignment: 1, } } } impl Write for ZipWriter { fn write(&mut self, buf: &[u8]) -> io::Result { if !self.writing_to_file { return Err(io::Error::new( io::ErrorKind::Other, "No file has been started", )); } match self.inner.ref_mut() { Some(ref mut w) => { let write_result = w.write(buf); if let Ok(count) = write_result { self.stats.update(&buf[0..count]); if self.stats.bytes_written > spec::ZIP64_BYTES_THR && !self.files.last_mut().unwrap().large_file { self.abort_file().unwrap(); return Err(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", )); } } write_result } None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "write(): ZipWriter was already closed", )), } } fn flush(&mut self) -> io::Result<()> { match self.inner.ref_mut() { Some(ref mut w) => w.flush(), None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "flush(): ZipWriter was already closed", )), } } } impl ZipWriterStats { fn update(&mut self, buf: &[u8]) { self.hasher.update(buf); self.bytes_written += buf.len() as u64; } } impl ZipWriter { /// Initializes the archive from an existing ZIP archive, making it ready for append. pub fn new_append(mut readwriter: A) -> ZipResult> { let (footer, cde_start_pos) = spec::CentralDirectoryEnd::find_and_parse(&mut readwriter)?; if footer.disk_number != footer.disk_with_central_directory { return Err(ZipError::UnsupportedArchive( "Support for multi-disk files is not implemented", )); } let (archive_offset, directory_start, number_of_files) = ZipArchive::get_directory_counts(&mut readwriter, &footer, cde_start_pos)?; if readwriter.seek(SeekFrom::Start(directory_start)).is_err() { return Err(InvalidArchive( "Could not seek to start of central directory", )); } let files = (0..number_of_files) .map(|_| central_header_to_zip_file(&mut readwriter, archive_offset)) .collect::, _>>()?; let mut files_by_name = HashMap::new(); for (index, file) in files.iter().enumerate() { files_by_name.insert(file.file_name.to_owned(), index); } let _ = readwriter.seek(SeekFrom::Start(directory_start)); // seek directory_start to overwrite it Ok(ZipWriter { inner: Storer(MaybeEncrypted::Unencrypted(readwriter)), files, files_by_name, stats: Default::default(), writing_to_file: false, comment: footer.zip_file_comment, writing_raw: true, // avoid recomputing the last file's header }) } } impl ZipWriter { /// Adds another copy of a file already in this archive. This will produce a larger but more /// widely-compatible archive compared to [shallow_copy_file]. Does not copy alignment. pub fn deep_copy_file(&mut self, src_name: &str, dest_name: &str) -> ZipResult<()> { self.finish_file()?; let write_position = self.inner.get_plain().stream_position()?; let src_index = self.index_by_name(src_name)?; let src_data = &self.files[src_index]; let data_start = src_data.data_start.load(); let compressed_size = src_data.compressed_size; debug_assert!(compressed_size <= write_position - data_start); let uncompressed_size = src_data.uncompressed_size; let mut options = FileOptions { compression_method: src_data.compression_method, compression_level: src_data.compression_level, last_modified_time: src_data.last_modified_time, permissions: src_data.unix_mode(), large_file: src_data.large_file, encrypt_with: None, extra_data: Cow::Borrowed(&src_data.extra_field), central_extra_data: Cow::Borrowed(&src_data.central_extra_field), alignment: 1, }; if let Some(perms) = src_data.unix_mode() { options = options.unix_permissions(perms); } Self::normalize_options(&mut options); let raw_values = ZipRawValues { crc32: src_data.crc32, compressed_size, uncompressed_size, }; let mut reader = BufReader::new(ZipFileReader::Raw(find_content( src_data, self.inner.get_plain(), )?)); let mut copy = Vec::with_capacity(compressed_size as usize); reader.read_to_end(&mut copy)?; drop(reader); self.inner .get_plain() .seek(SeekFrom::Start(write_position))?; self.start_entry(dest_name, options, Some(raw_values))?; self.writing_to_file = true; self.writing_raw = true; if let Err(e) = self.write_all(©) { self.abort_file().unwrap(); return Err(e.into()); } self.finish_file() } } impl ZipWriter { /// Initializes the archive. /// /// Before writing to this object, the [`ZipWriter::start_file`] function should be called. /// After a successful write, the file remains open for writing. After a failed write, call /// [`ZipWriter::is_writing_file`] to determine if the file remains open. pub fn new(inner: W) -> ZipWriter { ZipWriter { inner: Storer(MaybeEncrypted::Unencrypted(inner)), files: Vec::new(), files_by_name: HashMap::new(), stats: Default::default(), writing_to_file: false, writing_raw: false, comment: Vec::new(), } } /// Returns true if a file is currently open for writing. pub fn is_writing_file(&self) -> bool { self.writing_to_file && !self.inner.is_closed() } /// Set ZIP archive comment. pub fn set_comment(&mut self, comment: S) where S: Into, { self.set_raw_comment(comment.into().into()) } /// Set ZIP archive comment. /// /// This sets the raw bytes of the comment. The comment /// is typically expected to be encoded in UTF-8 pub fn set_raw_comment(&mut self, comment: Vec) { self.comment = comment; } /// Start a new file for with the requested options. fn start_entry( &mut self, name: S, options: FileOptions, raw_values: Option, ) -> ZipResult<()> where S: Into, { self.finish_file()?; let raw_values = raw_values.unwrap_or(ZipRawValues { crc32: 0, compressed_size: 0, uncompressed_size: 0, }); { let header_start = self.inner.get_plain().stream_position()?; let name = name.into(); let permissions = options.permissions.unwrap_or(0o100644); let file = ZipFileData { system: System::Unix, version_made_by: DEFAULT_VERSION, encrypted: options.encrypt_with.is_some(), using_data_descriptor: false, compression_method: options.compression_method, compression_level: options.compression_level, last_modified_time: options.last_modified_time, crc32: raw_values.crc32, compressed_size: raw_values.compressed_size, uncompressed_size: raw_values.uncompressed_size, file_name: name, file_name_raw: Vec::new(), // Never used for saving extra_field: options.extra_data.to_vec(), central_extra_field: options.central_extra_data.to_vec(), file_comment: String::new(), header_start, data_start: AtomicU64::new(0), central_header_start: 0, external_attributes: permissions << 16, large_file: options.large_file, aes_mode: None, }; let index = self.insert_file_data(file)?; let file = &mut self.files[index]; let writer = self.inner.get_plain(); writer.write_u32::(spec::LOCAL_FILE_HEADER_SIGNATURE)?; // version needed to extract writer.write_u16::(file.version_needed())?; // general purpose bit flag let flag = if !file.file_name.is_ascii() { 1u16 << 11 } else { 0 } | if file.encrypted { 1u16 << 0 } else { 0 }; writer.write_u16::(flag)?; // Compression method #[allow(deprecated)] writer.write_u16::(file.compression_method.to_u16())?; // last mod file time and last mod file date writer.write_u16::(file.last_modified_time.timepart())?; writer.write_u16::(file.last_modified_time.datepart())?; // crc-32 writer.write_u32::(file.crc32)?; // compressed size and uncompressed size if file.large_file { writer.write_u32::(spec::ZIP64_BYTES_THR as u32)?; writer.write_u32::(spec::ZIP64_BYTES_THR as u32)?; } else { writer.write_u32::(file.compressed_size as u32)?; writer.write_u32::(file.uncompressed_size as u32)?; } // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length let mut extra_field_length = file.extra_field.len(); if file.large_file { extra_field_length += 20; } if extra_field_length + file.central_extra_field.len() > u16::MAX as usize { let _ = self.abort_file(); return Err(InvalidArchive("Extra data field is too large")); } let extra_field_length = extra_field_length as u16; writer.write_u16::(extra_field_length)?; // file name writer.write_all(file.file_name.as_bytes())?; // zip64 extra field if file.large_file { write_local_zip64_extra_field(writer, file)?; } writer.write_all(&file.extra_field)?; let mut header_end = writer.stream_position()?; if options.alignment > 1 { let align = options.alignment as u64; if header_end % align != 0 { let pad_length = (align - (header_end + 4) % align) % align; if pad_length + extra_field_length as u64 > u16::MAX as u64 { let _ = self.abort_file(); return Err(InvalidArchive( "Extra data field would be larger than allowed after aligning", )); } let pad = vec![0; pad_length as usize]; writer.write_all(b"za").map_err(ZipError::from)?; // 0x617a writer .write_u16::(pad.len() as u16) .map_err(ZipError::from)?; writer.write_all(&pad).map_err(ZipError::from)?; header_end = writer.stream_position()?; // Update extra field length in local file header. writer.seek(SeekFrom::Start(file.header_start + 28))?; writer.write_u16::(pad_length as u16 + extra_field_length)?; writer.seek(SeekFrom::Start(header_end))?; debug_assert_eq!(header_end % align, 0); } } if let Some(keys) = options.encrypt_with { let mut zipwriter = crate::zipcrypto::ZipCryptoWriter { writer: mem::replace(&mut self.inner, Closed).unwrap(), buffer: vec![], keys, }; let crypto_header = [0u8; 12]; zipwriter.write_all(&crypto_header)?; header_end = zipwriter.writer.stream_position()?; self.inner = Storer(MaybeEncrypted::Encrypted(zipwriter)); } self.stats.start = header_end; *file.data_start.get_mut() = header_end; self.writing_to_file = true; self.stats.bytes_written = 0; self.stats.hasher = Hasher::new(); } Ok(()) } fn insert_file_data(&mut self, file: ZipFileData) -> ZipResult { let name = &file.file_name; if self.files_by_name.contains_key(name) { return Err(InvalidArchive("Duplicate filename")); } let name = name.to_owned(); self.files.push(file); let index = self.files.len() - 1; self.files_by_name.insert(name, index); Ok(index) } fn finish_file(&mut self) -> ZipResult<()> { if !self.writing_to_file { return Ok(()); } let make_plain_writer = self .inner .prepare_next_writer(CompressionMethod::Stored, None)?; self.inner.switch_to(make_plain_writer)?; self.switch_to_non_encrypting_writer()?; let writer = self.inner.get_plain(); if !self.writing_raw { let file = match self.files.last_mut() { None => return Ok(()), Some(f) => f, }; file.crc32 = self.stats.hasher.clone().finalize(); file.uncompressed_size = self.stats.bytes_written; let file_end = writer.stream_position()?; debug_assert!(file_end >= self.stats.start); file.compressed_size = file_end - self.stats.start; update_local_file_header(writer, file)?; writer.seek(SeekFrom::Start(file_end))?; } self.writing_to_file = false; Ok(()) } fn switch_to_non_encrypting_writer(&mut self) -> Result<(), ZipError> { match mem::replace(&mut self.inner, Closed) { Storer(MaybeEncrypted::Encrypted(writer)) => { let crc32 = self.stats.hasher.clone().finalize(); self.inner = Storer(MaybeEncrypted::Unencrypted(writer.finish(crc32)?)) } Storer(MaybeEncrypted::Unencrypted(w)) => { self.inner = Storer(MaybeEncrypted::Unencrypted(w)) } _ => unreachable!(), } Ok(()) } /// Removes the file currently being written from the archive if there is one, or else removes /// the file most recently written. pub fn abort_file(&mut self) -> ZipResult<()> { let last_file = self.files.pop().ok_or(ZipError::FileNotFound)?; self.files_by_name.remove(&last_file.file_name); let make_plain_writer = self .inner .prepare_next_writer(CompressionMethod::Stored, None)?; self.inner.switch_to(make_plain_writer)?; self.inner .get_plain() .seek(SeekFrom::Start(last_file.header_start))?; self.writing_to_file = false; Ok(()) } /// Create a file in the archive and start writing its' contents. The file must not have the /// same name as a file already in the archive. /// /// The data should be written using the [`Write`] implementation on this [`ZipWriter`] pub fn start_file(&mut self, name: S, mut options: FileOptions) -> ZipResult<()> where S: Into, { Self::normalize_options(&mut options); let make_new_self = self .inner .prepare_next_writer(options.compression_method, options.compression_level)?; self.start_entry(name, options, None)?; if let Err(e) = self.inner.switch_to(make_new_self) { self.abort_file().unwrap(); return Err(e); } self.writing_raw = false; Ok(()) } fn normalize_options(options: &mut FileOptions) { if options.permissions.is_none() { options.permissions = Some(0o644); } if !options.last_modified_time.is_valid() { options.last_modified_time = FileOptions::default().last_modified_time; } *options.permissions.as_mut().unwrap() |= ffi::S_IFREG; } /// Starts a file, taking a Path as argument. /// /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal' /// Components, such as a starting '/' or '..' and '.'. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. Use `start_file` instead." )] pub fn start_file_from_path( &mut self, path: &std::path::Path, options: FileOptions, ) -> ZipResult<()> { self.start_file(path_to_string(path), options) } /// Add a new file using the already compressed data from a ZIP file being read and renames it, this /// allows faster copies of the `ZipFile` since there is no need to decompress and compress it again. /// Any `ZipFile` metadata is copied and not checked, for example the file CRC. /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip_next::{ZipArchive, ZipWriter}; /// /// fn copy_rename( /// src: &mut ZipArchive, /// dst: &mut ZipWriter, /// ) -> zip_next::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy and rename the previously obtained file entry to the destination zip archive /// dst.raw_copy_file_rename(file, "new_name.txt")?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file_rename(&mut self, mut file: ZipFile, name: S) -> ZipResult<()> where S: Into, { let mut options = FileOptions::default() .large_file(file.compressed_size().max(file.size()) > spec::ZIP64_BYTES_THR) .last_modified_time(file.last_modified()) .compression_method(file.compression()); if let Some(perms) = file.unix_mode() { options = options.unix_permissions(perms); } Self::normalize_options(&mut options); let raw_values = ZipRawValues { crc32: file.crc32(), compressed_size: file.compressed_size(), uncompressed_size: file.size(), }; self.start_entry(name, options, Some(raw_values))?; self.writing_to_file = true; self.writing_raw = true; io::copy(file.get_raw_reader(), self)?; Ok(()) } /// Add a new file using the already compressed data from a ZIP file being read, this allows faster /// copies of the `ZipFile` since there is no need to decompress and compress it again. Any `ZipFile` /// metadata is copied and not checked, for example the file CRC. /// /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip_next::{ZipArchive, ZipWriter}; /// /// fn copy(src: &mut ZipArchive, dst: &mut ZipWriter) -> zip_next::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy the previously obtained file entry to the destination zip archive /// dst.raw_copy_file(file)?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file(&mut self, file: ZipFile) -> ZipResult<()> { let name = file.name().to_owned(); self.raw_copy_file_rename(file, name) } /// Add a directory entry. /// /// As directories have no content, you must not call [`ZipWriter::write`] before adding a new file. pub fn add_directory(&mut self, name: S, mut options: FileOptions) -> ZipResult<()> where S: Into, { if options.permissions.is_none() { options.permissions = Some(0o755); } *options.permissions.as_mut().unwrap() |= 0o40000; options.compression_method = CompressionMethod::Stored; let name_as_string = name.into(); // Append a slash to the filename if it does not end with it. let name_with_slash = match name_as_string.chars().last() { Some('/') | Some('\\') => name_as_string, _ => name_as_string + "/", }; self.start_entry(name_with_slash, options, None)?; self.writing_to_file = false; self.switch_to_non_encrypting_writer()?; Ok(()) } /// Add a directory entry, taking a Path as argument. /// /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal' /// Components, such as a starting '/' or '..' and '.'. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. Use `add_directory` instead." )] pub fn add_directory_from_path( &mut self, path: &std::path::Path, options: FileOptions, ) -> ZipResult<()> { self.add_directory(path_to_string(path), options) } /// Finish the last file and write all other zip-structures /// /// This will return the writer, but one should normally not append any data to the end of the file. /// Note that the zipfile will also be finished on drop. pub fn finish(&mut self) -> ZipResult { self.finalize()?; let inner = mem::replace(&mut self.inner, Closed); Ok(inner.unwrap()) } /// Add a symlink entry. /// /// The zip archive will contain an entry for path `name` which is a symlink to `target`. /// /// No validation or normalization of the paths is performed. For best results, /// callers should normalize `\` to `/` and ensure symlinks are relative to other /// paths within the zip archive. /// /// WARNING: not all zip implementations preserve symlinks on extract. Some zip /// implementations may materialize a symlink as a regular file, possibly with the /// content incorrectly set to the symlink target. For maximum portability, consider /// storing a regular file instead. pub fn add_symlink( &mut self, name: N, target: T, mut options: FileOptions, ) -> ZipResult<()> where N: Into, T: Into, { if options.permissions.is_none() { options.permissions = Some(0o777); } *options.permissions.as_mut().unwrap() |= 0o120000; // The symlink target is stored as file content. And compressing the target path // likely wastes space. So always store. options.compression_method = CompressionMethod::Stored; self.start_entry(name, options, None)?; self.writing_to_file = true; if let Err(e) = self.write_all(target.into().as_bytes()) { self.abort_file().unwrap(); return Err(e.into()); } self.finish_file()?; Ok(()) } fn finalize(&mut self) -> ZipResult<()> { self.finish_file()?; { let writer = self.inner.get_plain(); let central_start = writer.stream_position()?; for file in self.files.iter() { write_central_directory_header(writer, file)?; } let central_size = writer.stream_position()? - central_start; if self.files.len() > spec::ZIP64_ENTRY_THR || central_size.max(central_start) > spec::ZIP64_BYTES_THR { let zip64_footer = spec::Zip64CentralDirectoryEnd { version_made_by: DEFAULT_VERSION as u16, version_needed_to_extract: DEFAULT_VERSION as u16, disk_number: 0, disk_with_central_directory: 0, number_of_files_on_this_disk: self.files.len() as u64, number_of_files: self.files.len() as u64, central_directory_size: central_size, central_directory_offset: central_start, }; zip64_footer.write(writer)?; let zip64_footer = spec::Zip64CentralDirectoryEndLocator { disk_with_central_directory: 0, end_of_central_directory_offset: central_start + central_size, number_of_disks: 1, }; zip64_footer.write(writer)?; } let number_of_files = self.files.len().min(spec::ZIP64_ENTRY_THR) as u16; let footer = spec::CentralDirectoryEnd { disk_number: 0, disk_with_central_directory: 0, zip_file_comment: self.comment.clone(), number_of_files_on_this_disk: number_of_files, number_of_files, central_directory_size: central_size.min(spec::ZIP64_BYTES_THR) as u32, central_directory_offset: central_start.min(spec::ZIP64_BYTES_THR) as u32, }; footer.write(writer)?; } Ok(()) } fn index_by_name(&self, name: &str) -> ZipResult { Ok(*self.files_by_name.get(name).ok_or(ZipError::FileNotFound)?) } /// Adds another entry to the central directory referring to the same content as an existing /// entry. The file's local-file header will still refer to it by its original name, so /// unzipping the file will technically be unspecified behavior. [ZipArchive] ignores the /// filename in the local-file header and treat the central directory as authoritative. However, /// some other software (e.g. Minecraft) will refuse to extract a file copied this way. pub fn shallow_copy_file(&mut self, src_name: &str, dest_name: &str) -> ZipResult<()> { self.finish_file()?; let src_index = self.index_by_name(src_name)?; let mut dest_data = self.files[src_index].to_owned(); dest_data.file_name = dest_name.into(); self.insert_file_data(dest_data)?; Ok(()) } } impl Drop for ZipWriter { fn drop(&mut self) { if !self.inner.is_closed() { if let Err(e) = self.finalize() { let _ = write!(io::stderr(), "ZipWriter drop failed: {:?}", e); } } } } type SwitchWriterFunction = Box) -> GenericZipWriter>; impl GenericZipWriter { fn prepare_next_writer( &self, compression: CompressionMethod, compression_level: Option, ) -> ZipResult> { if let Closed = self { return Err( io::Error::new(io::ErrorKind::BrokenPipe, "ZipWriter was already closed").into(), ); } { #[allow(deprecated)] match compression { CompressionMethod::Stored => { if compression_level.is_some() { Err(ZipError::UnsupportedArchive( "Unsupported compression level", )) } else { Ok(Box::new(|bare| Storer(bare))) } } #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] CompressionMethod::Deflated => { let level = clamp_opt( compression_level.unwrap_or(flate2::Compression::default().level() as i32), deflate_compression_level_range(), ) .ok_or(ZipError::UnsupportedArchive( "Unsupported compression level", ))? as u32; Ok(Box::new(move |bare| { GenericZipWriter::Deflater(DeflateEncoder::new( bare, flate2::Compression::new(level), )) })) } #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => { let level = clamp_opt( compression_level.unwrap_or(bzip2::Compression::default().level() as i32), bzip2_compression_level_range(), ) .ok_or(ZipError::UnsupportedArchive( "Unsupported compression level", ))? as u32; Ok(Box::new(move |bare| { GenericZipWriter::Bzip2(BzEncoder::new( bare, bzip2::Compression::new(level), )) })) } CompressionMethod::AES => Err(ZipError::UnsupportedArchive( "AES compression is not supported for writing", )), #[cfg(feature = "zstd")] CompressionMethod::Zstd => { let level = clamp_opt( compression_level.unwrap_or(zstd::DEFAULT_COMPRESSION_LEVEL), zstd::compression_level_range(), ) .ok_or(ZipError::UnsupportedArchive( "Unsupported compression level", ))?; Ok(Box::new(move |bare| { GenericZipWriter::Zstd(ZstdEncoder::new(bare, level).unwrap()) })) } CompressionMethod::Unsupported(..) => { Err(ZipError::UnsupportedArchive("Unsupported compression")) } } } } fn switch_to(&mut self, make_new_self: SwitchWriterFunction) -> ZipResult<()> { let bare = match mem::replace(self, Closed) { Storer(w) => w, #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(w) => w.finish()?, #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(w) => w.finish()?, #[cfg(feature = "zstd")] GenericZipWriter::Zstd(w) => w.finish()?, Closed => { return Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", ) .into()); } }; *self = (make_new_self)(bare); Ok(()) } fn ref_mut(&mut self) -> Option<&mut dyn Write> { match *self { Storer(ref mut w) => Some(w as &mut dyn Write), #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] GenericZipWriter::Deflater(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "zstd")] GenericZipWriter::Zstd(ref mut w) => Some(w as &mut dyn Write), Closed => None, } } fn is_closed(&self) -> bool { matches!(*self, GenericZipWriter::Closed) } fn get_plain(&mut self) -> &mut W { match *self { Storer(MaybeEncrypted::Unencrypted(ref mut w)) => w, _ => panic!("Should have switched to stored and unencrypted beforehand"), } } fn unwrap(self) -> W { match self { Storer(MaybeEncrypted::Unencrypted(w)) => w, _ => panic!("Should have switched to stored and unencrypted beforehand"), } } } #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib" ))] fn deflate_compression_level_range() -> std::ops::RangeInclusive { let min = flate2::Compression::none().level() as i32; let max = flate2::Compression::best().level() as i32; min..=max } #[cfg(feature = "bzip2")] fn bzip2_compression_level_range() -> std::ops::RangeInclusive { let min = bzip2::Compression::fast().level() as i32; let max = bzip2::Compression::best().level() as i32; min..=max } #[cfg(any( feature = "deflate", feature = "deflate-miniz", feature = "deflate-zlib", feature = "bzip2", feature = "zstd" ))] fn clamp_opt(value: T, range: std::ops::RangeInclusive) -> Option { if range.contains(&value) { Some(value) } else { None } } fn update_local_file_header(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { const CRC32_OFFSET: u64 = 14; writer.seek(SeekFrom::Start(file.header_start + CRC32_OFFSET))?; writer.write_u32::(file.crc32)?; if file.large_file { update_local_zip64_extra_field(writer, file)?; } else { // check compressed size as well as it can also be slightly larger than uncompressed size if file.compressed_size > spec::ZIP64_BYTES_THR { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", ))); } writer.write_u32::(file.compressed_size as u32)?; // uncompressed size is already checked on write to catch it as soon as possible writer.write_u32::(file.uncompressed_size as u32)?; } Ok(()) } fn write_central_directory_header(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // buffer zip64 extra field to determine its variable length let mut zip64_extra_field = [0; 28]; let zip64_extra_field_length = write_central_zip64_extra_field(&mut zip64_extra_field.as_mut(), file)?; // central file header signature writer.write_u32::(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?; // version made by let version_made_by = (file.system as u16) << 8 | (file.version_made_by as u16); writer.write_u16::(version_made_by)?; // version needed to extract writer.write_u16::(file.version_needed())?; // general puprose bit flag let flag = if !file.file_name.is_ascii() { 1u16 << 11 } else { 0 } | if file.encrypted { 1u16 << 0 } else { 0 }; writer.write_u16::(flag)?; // compression method #[allow(deprecated)] writer.write_u16::(file.compression_method.to_u16())?; // last mod file time + date writer.write_u16::(file.last_modified_time.timepart())?; writer.write_u16::(file.last_modified_time.datepart())?; // crc-32 writer.write_u32::(file.crc32)?; // compressed size writer.write_u32::(file.compressed_size.min(spec::ZIP64_BYTES_THR) as u32)?; // uncompressed size writer.write_u32::(file.uncompressed_size.min(spec::ZIP64_BYTES_THR) as u32)?; // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length writer.write_u16::( zip64_extra_field_length + file.extra_field.len() as u16 + file.central_extra_field.len() as u16, )?; // file comment length writer.write_u16::(0)?; // disk number start writer.write_u16::(0)?; // internal file attribytes writer.write_u16::(0)?; // external file attributes writer.write_u32::(file.external_attributes)?; // relative offset of local header writer.write_u32::(file.header_start.min(spec::ZIP64_BYTES_THR) as u32)?; // file name writer.write_all(file.file_name.as_bytes())?; // zip64 extra field writer.write_all(&zip64_extra_field[..zip64_extra_field_length as usize])?; // extra field writer.write_all(&file.extra_field)?; writer.write_all(&file.central_extra_field)?; // file comment // Ok(()) } fn validate_extra_data(header_id: u16, data: &[u8]) -> ZipResult<()> { if data.len() > u16::MAX as usize { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Extra-data field can't exceed u16::MAX bytes", ))); } if header_id == 0x0001 { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "No custom ZIP64 extra data allowed", ))); } #[cfg(not(feature = "unreserved"))] { if header_id <= 31 || EXTRA_FIELD_MAPPING .iter() .any(|&mapped| mapped == header_id) { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, format!( "Extra data header ID {header_id:#06} requires crate feature \"unreserved\"", ), ))); } } Ok(()) } fn write_local_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { // This entry in the Local header MUST include BOTH original // and compressed file size fields. writer.write_u16::(0x0001)?; writer.write_u16::(16)?; writer.write_u64::(file.uncompressed_size)?; writer.write_u64::(file.compressed_size)?; // Excluded fields: // u32: disk start number Ok(()) } fn update_local_zip64_extra_field( writer: &mut T, file: &ZipFileData, ) -> ZipResult<()> { let zip64_extra_field = file.header_start + 30 + file.file_name.as_bytes().len() as u64; writer.seek(SeekFrom::Start(zip64_extra_field + 4))?; writer.write_u64::(file.uncompressed_size)?; writer.write_u64::(file.compressed_size)?; // Excluded fields: // u32: disk start number Ok(()) } fn write_central_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult { // The order of the fields in the zip64 extended // information record is fixed, but the fields MUST // only appear if the corresponding Local or Central // directory record field is set to 0xFFFF or 0xFFFFFFFF. let mut size = 0; let uncompressed_size = file.uncompressed_size > spec::ZIP64_BYTES_THR; let compressed_size = file.compressed_size > spec::ZIP64_BYTES_THR; let header_start = file.header_start > spec::ZIP64_BYTES_THR; if uncompressed_size { size += 8; } if compressed_size { size += 8; } if header_start { size += 8; } if size > 0 { writer.write_u16::(0x0001)?; writer.write_u16::(size)?; size += 4; if uncompressed_size { writer.write_u64::(file.uncompressed_size)?; } if compressed_size { writer.write_u64::(file.compressed_size)?; } if header_start { writer.write_u64::(file.header_start)?; } // Excluded fields: // u32: disk start number } Ok(size) } fn path_to_string(path: &std::path::Path) -> String { let mut path_str = String::new(); for component in path.components() { if let std::path::Component::Normal(os_str) = component { if !path_str.is_empty() { path_str.push('/'); } path_str.push_str(&os_str.to_string_lossy()); } } path_str } #[cfg(test)] mod test { use std::borrow::Cow; use super::{FileOptions, ZipWriter}; use crate::compression::CompressionMethod; use crate::types::DateTime; use crate::ZipArchive; use std::io; use std::io::{Read, Write}; #[test] fn write_empty_zip() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer.set_comment("ZIP"); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 25); assert_eq!( *result.get_ref(), [80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 90, 73, 80] ); } #[test] fn unix_permissions_bitmask() { // unix_permissions() throws away upper bits. let options = FileOptions::default().unix_permissions(0o120777); assert_eq!(options.permissions, Some(0o777)); } #[test] fn write_zip_dir() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .add_directory( "test", FileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a directory is not allowed, and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 108); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 51, 0, 0, 0, 35, 0, 0, 0, 0, 0, ] as &[u8] ); } #[test] fn write_symlink_simple() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .add_symlink( "name", "target", FileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a symlink is not allowed and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 112); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0, 110, 97, 109, 101, 116, 97, 114, 103, 101, 116, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 161, 0, 0, 0, 0, 110, 97, 109, 101, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 50, 0, 0, 0, 40, 0, 0, 0, 0, 0 ] as &[u8], ); } #[test] fn write_symlink_wonky_paths() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .add_symlink( "directory\\link", "/absolute/symlink\\with\\mixed/slashes", FileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a symlink is not allowed and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 162); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95, 41, 81, 245, 36, 0, 0, 0, 36, 0, 0, 0, 14, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105, 110, 107, 47, 97, 98, 115, 111, 108, 117, 116, 101, 47, 115, 121, 109, 108, 105, 110, 107, 92, 119, 105, 116, 104, 92, 109, 105, 120, 101, 100, 47, 115, 108, 97, 115, 104, 101, 115, 80, 75, 1, 2, 46, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95, 41, 81, 245, 36, 0, 0, 0, 36, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 161, 0, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105, 110, 107, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 60, 0, 0, 0, 80, 0, 0, 0, 0, 0 ] as &[u8], ); } #[test] fn write_mimetype_zip() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); let options = FileOptions { compression_method: CompressionMethod::Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, extra_data: Cow::Owned(vec![]), central_extra_data: Cow::Owned(vec![]), alignment: 1, }; writer.start_file("mimetype", options).unwrap(); writer .write_all(b"application/vnd.oasis.opendocument.text") .unwrap(); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 153); let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); assert_eq!(result.get_ref(), &v); } #[cfg(test)] const RT_TEST_TEXT: &str = "And I can't stop thinking about the moments that I lost to you\ And I can't stop thinking of things I used to do\ And I can't stop making bad decisions\ And I can't stop eating stuff you make me chew\ I put on a smile like you wanna see\ Another day goes by that I long to be like you"; #[cfg(test)] const RT_TEST_FILENAME: &str = "subfolder/sub-subfolder/can't_stop.txt"; #[cfg(test)] const SECOND_FILENAME: &str = "different_name.xyz"; #[cfg(test)] const THIRD_FILENAME: &str = "third_name.xyz"; #[test] fn test_shallow_copy() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); let options = FileOptions { compression_method: CompressionMethod::Deflated, compression_level: Some(9), last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, extra_data: Cow::Owned(vec![]), central_extra_data: Cow::Owned(vec![]), alignment: 0, }; writer.start_file(RT_TEST_FILENAME, options).unwrap(); writer.write_all(RT_TEST_TEXT.as_ref()).unwrap(); writer .shallow_copy_file(RT_TEST_FILENAME, SECOND_FILENAME) .unwrap(); writer .shallow_copy_file(RT_TEST_FILENAME, SECOND_FILENAME) .expect_err("Duplicate filename"); let zip = writer.finish().unwrap(); let mut writer = ZipWriter::new_append(zip).unwrap(); writer .shallow_copy_file(SECOND_FILENAME, SECOND_FILENAME) .expect_err("Duplicate filename"); let zip = writer.finish().unwrap(); let mut reader = ZipArchive::new(zip).unwrap(); let mut file_names: Vec<&str> = reader.file_names().collect(); file_names.sort(); let mut expected_file_names = vec![RT_TEST_FILENAME, SECOND_FILENAME]; expected_file_names.sort(); assert_eq!(file_names, expected_file_names); let mut first_file_content = String::new(); reader .by_name(RT_TEST_FILENAME) .unwrap() .read_to_string(&mut first_file_content) .unwrap(); assert_eq!(first_file_content, RT_TEST_TEXT); let mut second_file_content = String::new(); reader .by_name(SECOND_FILENAME) .unwrap() .read_to_string(&mut second_file_content) .unwrap(); assert_eq!(second_file_content, RT_TEST_TEXT); } #[test] fn test_deep_copy() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); let options = FileOptions { compression_method: CompressionMethod::Deflated, compression_level: Some(9), last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, extra_data: Cow::Owned(vec![]), central_extra_data: Cow::Owned(vec![]), alignment: 0, }; writer.start_file(RT_TEST_FILENAME, options).unwrap(); writer.write_all(RT_TEST_TEXT.as_ref()).unwrap(); writer .deep_copy_file(RT_TEST_FILENAME, SECOND_FILENAME) .unwrap(); let zip = writer.finish().unwrap(); let mut writer = ZipWriter::new_append(zip).unwrap(); writer .deep_copy_file(RT_TEST_FILENAME, THIRD_FILENAME) .unwrap(); let zip = writer.finish().unwrap(); let mut reader = ZipArchive::new(zip).unwrap(); let mut file_names: Vec<&str> = reader.file_names().collect(); file_names.sort(); let mut expected_file_names = vec![RT_TEST_FILENAME, SECOND_FILENAME, THIRD_FILENAME]; expected_file_names.sort(); assert_eq!(file_names, expected_file_names); let mut first_file_content = String::new(); reader .by_name(RT_TEST_FILENAME) .unwrap() .read_to_string(&mut first_file_content) .unwrap(); assert_eq!(first_file_content, RT_TEST_TEXT); let mut second_file_content = String::new(); reader .by_name(SECOND_FILENAME) .unwrap() .read_to_string(&mut second_file_content) .unwrap(); assert_eq!(second_file_content, RT_TEST_TEXT); } #[test] fn duplicate_filenames() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .start_file("foo/bar/test", FileOptions::default()) .unwrap(); writer .write_all("The quick brown 🦊 jumps over the lazy 🐕".as_bytes()) .unwrap(); writer .start_file("foo/bar/test", FileOptions::default()) .expect_err("Expected duplicate filename not to be allowed"); } #[test] fn test_filename_looks_like_zip64_locator() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .start_file( "PK\u{6}\u{7}\0\0\0\u{11}\0\0\0\0\0\0\0\0\0\0\0\0", FileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_2() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .start_file( "PK\u{6}\u{6}\0\0\0\0\0\0\0\0\0\0PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", FileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); println!("{:02x?}", zip.get_ref()); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_2a() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .start_file( "PK\u{6}\u{6}PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", FileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); println!("{:02x?}", zip.get_ref()); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_3() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer .start_file("\0PK\u{6}\u{6}", FileOptions::default()) .unwrap(); writer .start_file( "\0\u{4}\0\0PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{3}", FileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); println!("{:02x?}", zip.get_ref()); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_4() { let mut writer = ZipWriter::new(io::Cursor::new(Vec::new())); writer.start_file("PK\u{6}\u{6}", FileOptions::default()).unwrap(); writer.start_file("\0\0\0\0\0\0", FileOptions::default()).unwrap(); writer.start_file("\0", FileOptions::default()).unwrap(); writer.start_file("", FileOptions::default()).unwrap(); writer.start_file("\0\0", FileOptions::default()).unwrap(); writer.start_file("\0\0\0PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", FileOptions::default()).unwrap(); let zip = writer.finish().unwrap(); println!("{:02x?}", zip.get_ref()); let _ = ZipArchive::new(zip).unwrap(); } } #[cfg(not(feature = "unreserved"))] const EXTRA_FIELD_MAPPING: [u16; 49] = [ 0x0001, 0x0007, 0x0008, 0x0009, 0x000a, 0x000c, 0x000d, 0x000e, 0x000f, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x0020, 0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690, 0x07c8, 0x2605, 0x2705, 0x2805, 0x334d, 0x4341, 0x4453, 0x4704, 0x470f, 0x4b46, 0x4c41, 0x4d49, 0x4f4c, 0x5356, 0x5455, 0x554e, 0x5855, 0x6375, 0x6542, 0x7075, 0x756e, 0x7855, 0xa11e, 0xa220, 0xfd4a, 0x9901, 0x9902, ];