From a191c4b435c683701b5e7b7c891f094da3071fd1 Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Wed, 16 Sep 2020 11:45:21 +0200 Subject: [PATCH 01/10] Support extra field. --- src/read.rs | 17 ++++++++++----- src/types.rs | 3 +++ src/write.rs | 51 ++++++++++++++++++++++++++++++++++++++------- tests/end_to_end.rs | 16 +++++++++++++- 4 files changed, 74 insertions(+), 13 deletions(-) diff --git a/src/read.rs b/src/read.rs index d19b353f..240bccba 100644 --- a/src/read.rs +++ b/src/read.rs @@ -502,6 +502,7 @@ fn central_header_to_zip_file( uncompressed_size: uncompressed_size as u64, file_name, file_name_raw, + extra_field, file_comment, header_start: offset, central_header_start, @@ -509,7 +510,7 @@ fn central_header_to_zip_file( external_attributes: external_file_attributes, }; - match parse_extra_field(&mut result, &*extra_field) { + match parse_extra_field(&mut result) { Ok(..) | Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } @@ -520,10 +521,10 @@ fn central_header_to_zip_file( Ok(result) } -fn parse_extra_field(file: &mut ZipFileData, data: &[u8]) -> ZipResult<()> { - let mut reader = io::Cursor::new(data); +fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> { + let mut reader = io::Cursor::new(&file.extra_field); - while (reader.position() as usize) < data.len() { + while (reader.position() as usize) < file.extra_field.len() { let kind = reader.read_u16::()?; let len = reader.read_u16::()?; let mut len_left = len as i64; @@ -652,6 +653,11 @@ impl<'a> ZipFile<'a> { self.data.crc32 } + /// Get the extra data of the zip header for this file + pub fn extra_data(&self) -> &[u8] { + &self.data.extra_field + } + /// Get the starting offset of the data of the compressed file pub fn data_start(&self) -> u64 { self.data.data_start @@ -761,6 +767,7 @@ pub fn read_zipfile_from_stream<'a, R: io::Read>( uncompressed_size: uncompressed_size as u64, file_name, file_name_raw, + extra_field, file_comment: String::new(), // file comment is only available in the central directory // header_start and data start are not available, but also don't matter, since seeking is // not available. @@ -773,7 +780,7 @@ pub fn read_zipfile_from_stream<'a, R: io::Read>( external_attributes: 0, }; - match parse_extra_field(&mut result, &extra_field) { + match parse_extra_field(&mut result) { Ok(..) | Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } diff --git a/src/types.rs b/src/types.rs index f4684027..1f4c13eb 100644 --- a/src/types.rs +++ b/src/types.rs @@ -230,6 +230,8 @@ pub struct ZipFileData { pub file_name: String, /// Raw file name. To be used when file_name was incorrectly decoded. pub file_name_raw: Vec, + /// Extra field usually used for storage expansion + pub extra_field: Vec, /// File comment pub file_comment: String, /// Specifies where the local header of the file starts @@ -310,6 +312,7 @@ mod test { uncompressed_size: 0, file_name: file_name.clone(), file_name_raw: file_name.into_bytes(), + extra_field: Vec::new(), file_comment: String::new(), header_start: 0, data_start: 0, diff --git a/src/write.rs b/src/write.rs index b8e97b08..d97080bf 100644 --- a/src/write.rs +++ b/src/write.rs @@ -209,9 +209,16 @@ impl ZipWriter { } /// Start a new file for with the requested options. - fn start_entry(&mut self, name: S, options: FileOptions) -> ZipResult<()> + fn start_entry( + &mut self, + name: S, + options: FileOptions, + extra_data: F, + ) -> ZipResult<()> where S: Into, + V: Into>, + F: FnOnce(u64) -> V, { self.finish_file()?; @@ -222,6 +229,7 @@ impl ZipWriter { let permissions = options.permissions.unwrap_or(0o100644); let file_name = name.into(); let file_name_raw = file_name.clone().into_bytes(); + let extra_field = extra_data(header_start + 30 + file_name_raw.len() as u64).into(); let mut file = ZipFileData { system: System::Unix, version_made_by: DEFAULT_VERSION, @@ -233,6 +241,7 @@ impl ZipWriter { uncompressed_size: 0, file_name, file_name_raw, + extra_field, file_comment: String::new(), header_start, data_start: 0, @@ -288,7 +297,7 @@ impl ZipWriter { options.permissions = Some(0o644); } *options.permissions.as_mut().unwrap() |= 0o100000; - self.start_entry(name, options)?; + self.start_entry(name, options, |_data_start| Vec::new())?; self.writing_to_file = true; Ok(()) } @@ -309,6 +318,30 @@ impl ZipWriter { self.start_file(path_to_string(path), options) } + /// Starts a file with extra data. + /// + /// Extra data is given by closure which provides a preliminary `ZipFile::data_start()` as it + /// would be without any extra data. + pub fn start_file_with_extra_data( + &mut self, + name: S, + mut options: FileOptions, + extra_data: F, + ) -> ZipResult<()> + where + S: Into, + V: Into>, + F: FnOnce(u64) -> V, + { + if options.permissions.is_none() { + options.permissions = Some(0o644); + } + *options.permissions.as_mut().unwrap() |= 0o100000; + self.start_entry(name, options, extra_data)?; + self.writing_to_file = true; + Ok(()) + } + /// Add a directory entry. /// /// You can't write data to the file afterwards. @@ -329,7 +362,7 @@ impl ZipWriter { _ => name_as_string + "/", }; - self.start_entry(name_with_slash, options)?; + self.start_entry(name_with_slash, options, |_data_start| Vec::new())?; self.writing_to_file = false; Ok(()) } @@ -611,10 +644,14 @@ fn write_central_directory_header(writer: &mut T, file: &ZipFileData) Ok(()) } -fn build_extra_field(_file: &ZipFileData) -> ZipResult> { - let writer = Vec::new(); - // Future work - Ok(writer) +fn build_extra_field(file: &ZipFileData) -> ZipResult> { + if file.extra_field.len() > std::u16::MAX as usize { + Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Extra data exceeds extra field", + ))?; + } + Ok(file.extra_field.clone()) } fn path_to_string(path: &std::path::Path) -> String { diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs index 6268920a..ff25af16 100644 --- a/tests/end_to_end.rs +++ b/tests/end_to_end.rs @@ -28,6 +28,9 @@ fn write_to_zip_file(file: &mut Cursor>) -> zip::result::ZipResult<()> { zip.start_file("test/☃.txt", options)?; zip.write_all(b"Hello, World!\n")?; + zip.start_file_with_extra_data("test_with_extra_data/🐢.txt", options, |_| LOREM_IPSUM)?; + zip.write_all(b"Hello, World! Again.\n")?; + zip.start_file("test/lorem_ipsum.txt", Default::default())?; zip.write_all(LOREM_IPSUM)?; @@ -38,11 +41,22 @@ fn write_to_zip_file(file: &mut Cursor>) -> zip::result::ZipResult<()> { fn read_zip_file(zip_file: &mut Cursor>) -> zip::result::ZipResult { let mut archive = zip::ZipArchive::new(zip_file).unwrap(); - let expected_file_names = ["test/", "test/☃.txt", "test/lorem_ipsum.txt"]; + let expected_file_names = [ + "test/", + "test/☃.txt", + "test_with_extra_data/🐢.txt", + "test/lorem_ipsum.txt", + ]; let expected_file_names = HashSet::from_iter(expected_file_names.iter().map(|&v| v)); let file_names = archive.file_names().collect::>(); assert_eq!(file_names, expected_file_names); + { + let file_with_extra_data = archive.by_name("test_with_extra_data/🐢.txt")?; + let expected_extra_data = LOREM_IPSUM; + assert_eq!(file_with_extra_data.extra_data(), expected_extra_data); + } + let mut file = archive.by_name("test/lorem_ipsum.txt")?; let mut contents = String::new(); From 365f139206608023f313f9c8a6f0280ba6b2d717 Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Fri, 18 Sep 2020 16:05:01 +0200 Subject: [PATCH 02/10] Use `io::Write` for extra field. --- src/write.rs | 190 ++++++++++++++++++++++++++++++++++---------- tests/end_to_end.rs | 16 +++- 2 files changed, 159 insertions(+), 47 deletions(-) diff --git a/src/write.rs b/src/write.rs index d97080bf..5d2fb824 100644 --- a/src/write.rs +++ b/src/write.rs @@ -67,6 +67,8 @@ pub struct ZipWriter { files: Vec, stats: ZipWriterStats, writing_to_file: bool, + writing_to_extra_field: bool, + writing_to_central_extra_field_only: bool, comment: String, } @@ -155,11 +157,15 @@ impl Write for ZipWriter { } match self.inner.ref_mut() { Some(ref mut w) => { - let write_result = w.write(buf); - if let Ok(count) = write_result { - self.stats.update(&buf[0..count]); + if self.writing_to_extra_field { + self.files.last_mut().unwrap().extra_field.write(buf) + } else { + let write_result = w.write(buf); + if let Ok(count) = write_result { + self.stats.update(&buf[0..count]); + } + write_result } - write_result } None => Err(io::Error::new( io::ErrorKind::BrokenPipe, @@ -196,6 +202,8 @@ impl ZipWriter { files: Vec::new(), stats: Default::default(), writing_to_file: false, + writing_to_extra_field: false, + writing_to_central_extra_field_only: false, comment: String::new(), } } @@ -209,16 +217,9 @@ impl ZipWriter { } /// Start a new file for with the requested options. - fn start_entry( - &mut self, - name: S, - options: FileOptions, - extra_data: F, - ) -> ZipResult<()> + fn start_entry(&mut self, name: S, options: FileOptions) -> ZipResult<()> where S: Into, - V: Into>, - F: FnOnce(u64) -> V, { self.finish_file()?; @@ -229,7 +230,6 @@ impl ZipWriter { let permissions = options.permissions.unwrap_or(0o100644); let file_name = name.into(); let file_name_raw = file_name.clone().into_bytes(); - let extra_field = extra_data(header_start + 30 + file_name_raw.len() as u64).into(); let mut file = ZipFileData { system: System::Unix, version_made_by: DEFAULT_VERSION, @@ -241,7 +241,7 @@ impl ZipWriter { uncompressed_size: 0, file_name, file_name_raw, - extra_field, + extra_field: Vec::new(), file_comment: String::new(), header_start, data_start: 0, @@ -266,6 +266,10 @@ impl ZipWriter { } fn finish_file(&mut self) -> ZipResult<()> { + if self.writing_to_extra_field { + // Implicitly calling `end_extra_data()` for empty files. + self.end_extra_data()?; + } self.inner.switch_to(CompressionMethod::Stored)?; let writer = self.inner.get_plain(); @@ -297,14 +301,14 @@ impl ZipWriter { options.permissions = Some(0o644); } *options.permissions.as_mut().unwrap() |= 0o100000; - self.start_entry(name, options, |_data_start| Vec::new())?; + self.start_entry(name, options)?; self.writing_to_file = true; Ok(()) } /// Starts a file, taking a Path as argument. /// - /// This function ensures that the '/' path seperator is used. It also ignores all non 'Normal' + /// This function ensures that the '/' path separator is used. It also ignores all non 'Normal' /// Components, such as a starting '/' or '..' and '.'. #[deprecated( since = "0.5.7", @@ -318,28 +322,140 @@ impl ZipWriter { self.start_file(path_to_string(path), options) } - /// Starts a file with extra data. + /// Create a file in the archive and start writing its extra data first. /// - /// Extra data is given by closure which provides a preliminary `ZipFile::data_start()` as it - /// would be without any extra data. - pub fn start_file_with_extra_data( + /// Finish writing extra data and start writing file data with `end_extra_data()`. Optionally, + /// distinguish local from central extra data with `end_local_start_central_extra_data()`. + /// + /// Returns the preliminary starting offset of the file data without any extra data allowing to + /// align the file data by calculating a pad length to be prepended as part of the extra data. + /// + /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] + /// + /// ``` + /// use byteorder::{LittleEndian, WriteBytesExt}; + /// use zip::{ZipArchive, ZipWriter, write::FileOptions, result::ZipResult}; + /// use std::io::{Write, Cursor}; + /// + /// # fn main() -> ZipResult<()> { + /// let mut archive = Cursor::new(Vec::new()); + /// + /// { + /// let mut zip = ZipWriter::new(&mut archive); + /// let options = FileOptions::default(); + /// + /// zip.start_file_with_extra_data("identical_extra_data.txt", options)?; + /// let extra_data = b"local and central extra data"; + /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(extra_data.len() as u16)?; + /// zip.write_all(extra_data)?; + /// zip.end_extra_data()?; + /// zip.write_all(b"file data")?; + /// + /// let data_start = zip.start_file_with_extra_data("different_extra_data.txt", options)?; + /// let extra_data = b"local extra data"; + /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(extra_data.len() as u16)?; + /// zip.write_all(extra_data)?; + /// let data_start = data_start as usize + 4 + extra_data.len() + 4; + /// let align = 64; + /// let pad_length = (align - data_start % align) % align; + /// assert_eq!(pad_length, 17); + /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(pad_length as u16)?; + /// zip.write_all(&vec![0; pad_length])?; + /// let data_start = zip.end_local_start_central_extra_data()?; + /// assert_eq!(data_start as usize % align, 0); + /// let extra_data = b"central extra data"; + /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(extra_data.len() as u16)?; + /// zip.write_all(extra_data)?; + /// zip.end_extra_data()?; + /// zip.write_all(b"file data")?; + /// + /// zip.finish()?; + /// } + /// + /// let mut zip = ZipArchive::new(archive)?; + /// assert_eq!(&zip.by_index(0)?.extra_data()[4..], b"local and central extra data"); + /// assert_eq!(&zip.by_index(1)?.extra_data()[4..], b"central extra data"); + /// # Ok(()) + /// # } + /// ``` + pub fn start_file_with_extra_data( &mut self, name: S, mut options: FileOptions, - extra_data: F, - ) -> ZipResult<()> + ) -> ZipResult where S: Into, - V: Into>, - F: FnOnce(u64) -> V, { if options.permissions.is_none() { options.permissions = Some(0o644); } *options.permissions.as_mut().unwrap() |= 0o100000; - self.start_entry(name, options, extra_data)?; + self.start_entry(name, options)?; self.writing_to_file = true; - Ok(()) + self.writing_to_extra_field = true; + Ok(self.files.last().unwrap().data_start) + } + + /// End local and start central extra data. Requires `start_file_with_extra_data()`. + /// + /// Returns the final starting offset of the file data. + pub fn end_local_start_central_extra_data(&mut self) -> ZipResult { + let data_start = self.end_extra_data()?; + self.files.last_mut().unwrap().extra_field.clear(); + self.writing_to_extra_field = true; + self.writing_to_central_extra_field_only = true; + Ok(data_start) + } + + /// End extra data and start file data. Requires `start_file_with_extra_data()`. + /// + /// Returns the final starting offset of the file data. + pub fn end_extra_data(&mut self) -> ZipResult { + // Require `start_file_with_extra_data()`. Ensures `file` is some. + if !self.writing_to_extra_field { + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::Other, + "Not writing to extra field", + ))); + } + let file = self.files.last_mut().unwrap(); + + // Ensure extra data fits into extra field. + if file.extra_field.len() > 0xFFFF { + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::InvalidData, + "Extra data exceeds extra field", + ))); + } + + if !self.writing_to_central_extra_field_only { + self.inner.switch_to(CompressionMethod::Stored)?; + let writer = self.inner.get_plain(); + + // Append extra data to local file header and keep it for central file header. + writer.seek(io::SeekFrom::Start(file.data_start))?; + writer.write_all(&file.extra_field)?; + + // Update final `data_start` as done in `start_entry()`. + let header_end = writer.seek(io::SeekFrom::Current(0))?; + self.stats.start = header_end; + file.data_start = header_end; + + // Update extra field length in local file header. + writer.seek(io::SeekFrom::Start(file.header_start + 28))?; + writer.write_u16::(file.extra_field.len() as u16)?; + writer.seek(io::SeekFrom::Start(header_end))?; + + self.inner.switch_to(file.compression_method)?; + } + + self.writing_to_extra_field = false; + self.writing_to_central_extra_field_only = false; + Ok(file.data_start) } /// Add a directory entry. @@ -362,7 +478,7 @@ impl ZipWriter { _ => name_as_string + "/", }; - self.start_entry(name_with_slash, options, |_data_start| Vec::new())?; + self.start_entry(name_with_slash, options)?; self.writing_to_file = false; Ok(()) } @@ -570,12 +686,9 @@ fn write_local_file_header(writer: &mut T, file: &ZipFileData) -> ZipR // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length - let extra_field = build_extra_field(file)?; - writer.write_u16::(extra_field.len() as u16)?; + writer.write_u16::(file.extra_field.len() as u16)?; // file name writer.write_all(file.file_name.as_bytes())?; - // extra field - writer.write_all(&extra_field)?; Ok(()) } @@ -622,8 +735,7 @@ fn write_central_directory_header(writer: &mut T, file: &ZipFileData) // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length - let extra_field = build_extra_field(file)?; - writer.write_u16::(extra_field.len() as u16)?; + writer.write_u16::(file.extra_field.len() as u16)?; // file comment length writer.write_u16::(0)?; // disk number start @@ -637,23 +749,13 @@ fn write_central_directory_header(writer: &mut T, file: &ZipFileData) // file name writer.write_all(file.file_name.as_bytes())?; // extra field - writer.write_all(&extra_field)?; + writer.write_all(&file.extra_field)?; // file comment // Ok(()) } -fn build_extra_field(file: &ZipFileData) -> ZipResult> { - if file.extra_field.len() > std::u16::MAX as usize { - Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Extra data exceeds extra field", - ))?; - } - Ok(file.extra_field.clone()) -} - fn path_to_string(path: &std::path::Path) -> String { let mut path_str = String::new(); for component in path.components() { diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs index ff25af16..0821433f 100644 --- a/tests/end_to_end.rs +++ b/tests/end_to_end.rs @@ -1,3 +1,4 @@ +use byteorder::{LittleEndian, WriteBytesExt}; use std::collections::HashSet; use std::io::prelude::*; use std::io::Cursor; @@ -28,7 +29,11 @@ fn write_to_zip_file(file: &mut Cursor>) -> zip::result::ZipResult<()> { zip.start_file("test/☃.txt", options)?; zip.write_all(b"Hello, World!\n")?; - zip.start_file_with_extra_data("test_with_extra_data/🐢.txt", options, |_| LOREM_IPSUM)?; + zip.start_file_with_extra_data("test_with_extra_data/🐢.txt", options)?; + zip.write_u16::(0)?; + zip.write_u16::(EXTRA_DATA.len() as u16)?; + zip.write_all(EXTRA_DATA)?; + zip.end_extra_data()?; zip.write_all(b"Hello, World! Again.\n")?; zip.start_file("test/lorem_ipsum.txt", Default::default())?; @@ -53,8 +58,11 @@ fn read_zip_file(zip_file: &mut Cursor>) -> zip::result::ZipResult(0)?; + extra_data.write_u16::(EXTRA_DATA.len() as u16)?; + extra_data.write_all(EXTRA_DATA)?; + assert_eq!(file_with_extra_data.extra_data(), extra_data.as_slice()); } let mut file = archive.by_name("test/lorem_ipsum.txt")?; @@ -70,3 +78,5 @@ dictum quis auctor quis, suscipit id lorem. Aliquam vestibulum dolor nec enim ve vitae tristique consectetur, neque lectus pulvinar dui, sed feugiat purus diam id lectus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Maecenas feugiat velit in ex ultrices scelerisque id id neque. "; + +const EXTRA_DATA: &'static [u8] = b"Extra Data"; From d1d4326bff81bc3b9dcec739c12f8ee177221e3c Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Fri, 25 Sep 2020 12:08:53 +0200 Subject: [PATCH 03/10] Support aligned files. --- src/write.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/write.rs b/src/write.rs index 5d2fb824..2075c53b 100644 --- a/src/write.rs +++ b/src/write.rs @@ -322,6 +322,35 @@ impl ZipWriter { self.start_file(path_to_string(path), options) } + /// Create an aligned file in the archive and start writing its' contents. + /// + /// Returns the number of padding bytes required to align the file. + /// + /// The data should be written using the [`io::Write`] implementation on this [`ZipWriter`] + pub fn start_file_aligned( + &mut self, + name: S, + options: FileOptions, + align: u16, + ) -> Result + where + S: Into, + { + let data_start = self.start_file_with_extra_data(name, options)?; + let align = align as u64; + if align > 1 && data_start % align != 0 { + let pad_length = (align - (data_start + 4) % align) % align; + let pad = vec![0; pad_length as usize]; + self.write_all(b"za").map_err(ZipError::from)?; // 0x617a + self.write_u16::(pad.len() as u16) + .map_err(ZipError::from)?; + self.write_all(&pad).map_err(ZipError::from)?; + assert_eq!(self.end_local_start_central_extra_data()? % align, 0); + } + let extra_data_end = self.end_extra_data()?; + Ok(extra_data_end - data_start) + } + /// Create a file in the archive and start writing its extra data first. /// /// Finish writing extra data and start writing file data with `end_extra_data()`. Optionally, From ba8307abc7fd03fc0a27d674c7b6a2a6d84e5392 Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Thu, 1 Oct 2020 09:41:32 +0200 Subject: [PATCH 04/10] Improve extra field support. - Switch to compression method only once as was before extra data support allowing future encoders to do early writes when created. - Reduce seeks by calculating offsets. - Use `Stored` instead of feature dependent default for example. There is a 2-byte pad length difference with deflate disabled. --- src/write.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/write.rs b/src/write.rs index 2075c53b..72bc63ea 100644 --- a/src/write.rs +++ b/src/write.rs @@ -260,8 +260,6 @@ impl ZipWriter { self.files.push(file); } - self.inner.switch_to(options.compression_method)?; - Ok(()) } @@ -302,6 +300,7 @@ impl ZipWriter { } *options.permissions.as_mut().unwrap() |= 0o100000; self.start_entry(name, options)?; + self.inner.switch_to(options.compression_method)?; self.writing_to_file = true; Ok(()) } @@ -363,7 +362,8 @@ impl ZipWriter { /// /// ``` /// use byteorder::{LittleEndian, WriteBytesExt}; - /// use zip::{ZipArchive, ZipWriter, write::FileOptions, result::ZipResult}; + /// use zip::{ZipArchive, ZipWriter, result::ZipResult}; + /// use zip::{write::FileOptions, CompressionMethod}; /// use std::io::{Write, Cursor}; /// /// # fn main() -> ZipResult<()> { @@ -371,7 +371,8 @@ impl ZipWriter { /// /// { /// let mut zip = ZipWriter::new(&mut archive); - /// let options = FileOptions::default(); + /// let options = FileOptions::default() + /// .compression_method(CompressionMethod::Stored); /// /// zip.start_file_with_extra_data("identical_extra_data.txt", options)?; /// let extra_data = b"local and central extra data"; @@ -389,7 +390,7 @@ impl ZipWriter { /// let data_start = data_start as usize + 4 + extra_data.len() + 4; /// let align = 64; /// let pad_length = (align - data_start % align) % align; - /// assert_eq!(pad_length, 17); + /// assert_eq!(pad_length, 19); /// zip.write_u16::(0x0000)?; /// zip.write_u16::(pad_length as u16)?; /// zip.write_all(&vec![0; pad_length])?; @@ -462,15 +463,13 @@ impl ZipWriter { } if !self.writing_to_central_extra_field_only { - self.inner.switch_to(CompressionMethod::Stored)?; let writer = self.inner.get_plain(); // Append extra data to local file header and keep it for central file header. - writer.seek(io::SeekFrom::Start(file.data_start))?; writer.write_all(&file.extra_field)?; - // Update final `data_start` as done in `start_entry()`. - let header_end = writer.seek(io::SeekFrom::Current(0))?; + // Update final `data_start`. + let header_end = file.data_start + file.extra_field.len() as u64; self.stats.start = header_end; file.data_start = header_end; From 9397773a3240f24803a26afb8ef6778dec38291d Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Sat, 3 Oct 2020 11:05:23 +0200 Subject: [PATCH 05/10] Add ZIP64 write support. --- Cargo.toml | 1 + src/read.rs | 4 + src/spec.rs | 22 ++++ src/types.rs | 15 ++- src/write.rs | 278 +++++++++++++++++++++++++++++++++++++++----- tests/end_to_end.rs | 4 +- 6 files changed, 293 insertions(+), 31 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index abc33f7a..3712b7db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,7 @@ walkdir = "2" deflate = ["flate2/rust_backend"] deflate-miniz = ["flate2/default"] deflate-zlib = ["flate2/zlib"] +unreserved = [] default = ["bzip2", "deflate", "time"] [[bench]] diff --git a/src/read.rs b/src/read.rs index 240bccba..ed9c5451 100644 --- a/src/read.rs +++ b/src/read.rs @@ -508,6 +508,7 @@ fn central_header_to_zip_file( central_header_start, data_start: 0, external_attributes: external_file_attributes, + large_file: false, }; match parse_extra_field(&mut result) { @@ -530,6 +531,8 @@ fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> { let mut len_left = len as i64; // Zip64 extended information extra field if kind == 0x0001 { + file.large_file = true; + if file.uncompressed_size == 0xFFFFFFFF { file.uncompressed_size = reader.read_u64::()?; len_left -= 8; @@ -778,6 +781,7 @@ pub fn read_zipfile_from_stream<'a, R: io::Read>( // We set this to zero, which should be valid as the docs state 'If input came // from standard input, this field is set to zero.' external_attributes: 0, + large_file: false, }; match parse_extra_field(&mut result) { diff --git a/src/spec.rs b/src/spec.rs index 8fa8c5c1..2e25c400 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -120,6 +120,14 @@ impl Zip64CentralDirectoryEndLocator { number_of_disks, }) } + + pub fn write(&self, writer: &mut T) -> ZipResult<()> { + writer.write_u32::(ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE)?; + writer.write_u32::(self.disk_with_central_directory)?; + writer.write_u64::(self.end_of_central_directory_offset)?; + writer.write_u32::(self.number_of_disks)?; + Ok(()) + } } pub struct Zip64CentralDirectoryEnd { @@ -182,4 +190,18 @@ impl Zip64CentralDirectoryEnd { "Could not find ZIP64 central directory end", )) } + + pub fn write(&self, writer: &mut T) -> ZipResult<()> { + writer.write_u32::(ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE)?; + writer.write_u64::(44)?; // record size + writer.write_u16::(self.version_made_by)?; + writer.write_u16::(self.version_needed_to_extract)?; + writer.write_u32::(self.disk_number)?; + writer.write_u32::(self.disk_with_central_directory)?; + writer.write_u64::(self.number_of_files_on_this_disk)?; + writer.write_u64::(self.number_of_files)?; + writer.write_u64::(self.central_directory_size)?; + writer.write_u64::(self.central_directory_offset)?; + Ok(()) + } } diff --git a/src/types.rs b/src/types.rs index 1f4c13eb..c753fb43 100644 --- a/src/types.rs +++ b/src/types.rs @@ -244,6 +244,8 @@ pub struct ZipFileData { pub data_start: u64, /// External file attributes pub external_attributes: u32, + /// Reserve local ZIP64 extra field + pub large_file: bool, } impl ZipFileData { @@ -277,10 +279,18 @@ impl ZipFileData { }) } + pub fn zip64_extension(&self) -> bool { + self.uncompressed_size > 0xFFFFFFFF + || self.compressed_size > 0xFFFFFFFF + || self.header_start > 0xFFFFFFFF + } + pub fn version_needed(&self) -> u16 { - match self.compression_method { + // higher versions matched first + match (self.zip64_extension(), self.compression_method) { #[cfg(feature = "bzip2")] - crate::compression::CompressionMethod::Bzip2 => 46, + (_, crate::compression::CompressionMethod::Bzip2) => 46, + (true, _) => 45, _ => 20, } } @@ -318,6 +328,7 @@ mod test { data_start: 0, central_header_start: 0, external_attributes: 0, + large_file: false, }; assert_eq!( data.file_name_sanitized(), diff --git a/src/write.rs b/src/write.rs index 72bc63ea..9db89f04 100644 --- a/src/write.rs +++ b/src/write.rs @@ -4,7 +4,7 @@ use crate::compression::CompressionMethod; use crate::result::{ZipError, ZipResult}; use crate::spec; use crate::types::{DateTime, System, ZipFileData, DEFAULT_VERSION}; -use byteorder::{LittleEndian, WriteBytesExt}; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crc32fast::Hasher; use std::default::Default; use std::io; @@ -85,6 +85,7 @@ pub struct FileOptions { compression_method: CompressionMethod, last_modified_time: DateTime, permissions: Option, + large_file: bool, } impl FileOptions { @@ -108,6 +109,7 @@ impl FileOptions { #[cfg(not(feature = "time"))] last_modified_time: DateTime::default(), permissions: None, + large_file: false, } } @@ -115,7 +117,6 @@ impl FileOptions { /// /// The default is `CompressionMethod::Deflated`. If the deflate compression feature is /// disabled, `CompressionMethod::Stored` becomes the default. - /// otherwise. pub fn compression_method(mut self, method: CompressionMethod) -> FileOptions { self.compression_method = method; self @@ -139,6 +140,15 @@ impl FileOptions { self.permissions = Some(mode & 0o777); self } + + /// Set whether the new file's compressed and uncompressed size is less than 4 GiB. + /// + /// If set to `false` and the file exceeds the limit, an I/O error is thrown. If set to `true` + /// and the file does not exceed the limit, 20 B are wasted. The default is `false`. + pub fn large_file(mut self, large: bool) -> FileOptions { + self.large_file = large; + self + } } impl Default for FileOptions { @@ -163,6 +173,14 @@ impl Write for ZipWriter { let write_result = w.write(buf); if let Ok(count) = write_result { self.stats.update(&buf[0..count]); + if self.stats.bytes_written > 0xFFFFFFFF + && !self.files.last_mut().unwrap().large_file + { + return Err(io::Error::new( + io::ErrorKind::Other, + "Large file option has not been set", + )); + } } write_result } @@ -247,6 +265,7 @@ impl ZipWriter { data_start: 0, central_header_start: 0, external_attributes: permissions << 16, + large_file: options.large_file, }; write_local_file_header(writer, &file)?; @@ -376,7 +395,7 @@ impl ZipWriter { /// /// zip.start_file_with_extra_data("identical_extra_data.txt", options)?; /// let extra_data = b"local and central extra data"; - /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// zip.end_extra_data()?; @@ -384,20 +403,20 @@ impl ZipWriter { /// /// let data_start = zip.start_file_with_extra_data("different_extra_data.txt", options)?; /// let extra_data = b"local extra data"; - /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// let data_start = data_start as usize + 4 + extra_data.len() + 4; /// let align = 64; /// let pad_length = (align - data_start % align) % align; /// assert_eq!(pad_length, 19); - /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(0xdead)?; /// zip.write_u16::(pad_length as u16)?; /// zip.write_all(&vec![0; pad_length])?; /// let data_start = zip.end_local_start_central_extra_data()?; /// assert_eq!(data_start as usize % align, 0); /// let extra_data = b"central extra data"; - /// zip.write_u16::(0x0000)?; + /// zip.write_u16::(0xbeef)?; /// zip.write_u16::(extra_data.len() as u16)?; /// zip.write_all(extra_data)?; /// zip.end_extra_data()?; @@ -454,13 +473,7 @@ impl ZipWriter { } let file = self.files.last_mut().unwrap(); - // Ensure extra data fits into extra field. - if file.extra_field.len() > 0xFFFF { - return Err(ZipError::Io(io::Error::new( - io::ErrorKind::InvalidData, - "Extra data exceeds extra field", - ))); - } + validate_extra_data(&file)?; if !self.writing_to_central_extra_field_only { let writer = self.inner.get_plain(); @@ -474,8 +487,10 @@ impl ZipWriter { file.data_start = header_end; // Update extra field length in local file header. + let extra_field_length = + if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16; writer.seek(io::SeekFrom::Start(file.header_start + 28))?; - writer.write_u16::(file.extra_field.len() as u16)?; + writer.write_u16::(extra_field_length)?; writer.seek(io::SeekFrom::Start(header_end))?; self.inner.switch_to(file.compression_method)?; @@ -549,13 +564,50 @@ impl ZipWriter { } let central_size = writer.seek(io::SeekFrom::Current(0))? - central_start; + if self.files.len() > 0xFFFF || central_size > 0xFFFFFFFF || central_start > 0xFFFFFFFF + { + let zip64_footer = spec::Zip64CentralDirectoryEnd { + version_made_by: DEFAULT_VERSION as u16, + version_needed_to_extract: DEFAULT_VERSION as u16, + disk_number: 0, + disk_with_central_directory: 0, + number_of_files_on_this_disk: self.files.len() as u64, + number_of_files: self.files.len() as u64, + central_directory_size: central_size, + central_directory_offset: central_start, + }; + + zip64_footer.write(writer)?; + + let zip64_footer = spec::Zip64CentralDirectoryEndLocator { + disk_with_central_directory: 0, + end_of_central_directory_offset: central_start + central_size, + number_of_disks: 1, + }; + + zip64_footer.write(writer)?; + } + + let number_of_files = if self.files.len() > 0xFFFF { + 0xFFFF + } else { + self.files.len() as u16 + }; let footer = spec::CentralDirectoryEnd { disk_number: 0, disk_with_central_directory: 0, - number_of_files_on_this_disk: self.files.len() as u16, - number_of_files: self.files.len() as u16, - central_directory_size: central_size as u32, - central_directory_offset: central_start as u32, + number_of_files_on_this_disk: number_of_files, + number_of_files, + central_directory_size: if central_size > 0xFFFFFFFF { + 0xFFFFFFFF + } else { + central_size as u32 + }, + central_directory_offset: if central_start > 0xFFFFFFFF { + 0xFFFFFFFF + } else { + central_start as u32 + }, zip_file_comment: self.comment.as_bytes().to_vec(), }; @@ -708,15 +760,28 @@ fn write_local_file_header(writer: &mut T, file: &ZipFileData) -> ZipR // crc-32 writer.write_u32::(file.crc32)?; // compressed size - writer.write_u32::(file.compressed_size as u32)?; + writer.write_u32::(if file.compressed_size > 0xFFFFFFFF { + 0xFFFFFFFF + } else { + file.compressed_size as u32 + })?; // uncompressed size - writer.write_u32::(file.uncompressed_size as u32)?; + writer.write_u32::(if file.uncompressed_size > 0xFFFFFFFF { + 0xFFFFFFFF + } else { + file.uncompressed_size as u32 + })?; // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length - writer.write_u16::(file.extra_field.len() as u16)?; + let extra_field_length = if file.large_file { 20 } else { 0 } + file.extra_field.len() as u16; + writer.write_u16::(extra_field_length)?; // file name writer.write_all(file.file_name.as_bytes())?; + // zip64 extra field + if file.large_file { + write_local_zip64_extra_field(writer, &file)?; + } Ok(()) } @@ -728,12 +793,37 @@ fn update_local_file_header( const CRC32_OFFSET: u64 = 14; writer.seek(io::SeekFrom::Start(file.header_start + CRC32_OFFSET))?; writer.write_u32::(file.crc32)?; - writer.write_u32::(file.compressed_size as u32)?; - writer.write_u32::(file.uncompressed_size as u32)?; + writer.write_u32::(if file.compressed_size > 0xFFFFFFFF { + if file.large_file { + 0xFFFFFFFF + } else { + // compressed size can be slightly larger than uncompressed size + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::Other, + "Large file option has not been set", + ))); + } + } else { + file.compressed_size as u32 + })?; + writer.write_u32::(if file.uncompressed_size > 0xFFFFFFFF { + // uncompressed size is checked on write to catch it as soon as possible + 0xFFFFFFFF + } else { + file.uncompressed_size as u32 + })?; + if file.large_file { + update_local_zip64_extra_field(writer, file)?; + } Ok(()) } fn write_central_directory_header(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { + // buffer zip64 extra field to determine its variable length + let mut zip64_extra_field = [0; 28]; + let zip64_extra_field_length = + write_central_zip64_extra_field(&mut zip64_extra_field.as_mut(), file)?; + // central file header signature writer.write_u32::(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?; // version made by @@ -757,13 +847,21 @@ fn write_central_directory_header(writer: &mut T, file: &ZipFileData) // crc-32 writer.write_u32::(file.crc32)?; // compressed size - writer.write_u32::(file.compressed_size as u32)?; + writer.write_u32::(if file.compressed_size > 0xFFFFFFFF { + 0xFFFFFFFF + } else { + file.compressed_size as u32 + })?; // uncompressed size - writer.write_u32::(file.uncompressed_size as u32)?; + writer.write_u32::(if file.uncompressed_size > 0xFFFFFFFF { + 0xFFFFFFFF + } else { + file.uncompressed_size as u32 + })?; // file name length writer.write_u16::(file.file_name.as_bytes().len() as u16)?; // extra field length - writer.write_u16::(file.extra_field.len() as u16)?; + writer.write_u16::(zip64_extra_field_length + file.extra_field.len() as u16)?; // file comment length writer.write_u16::(0)?; // disk number start @@ -773,9 +871,15 @@ fn write_central_directory_header(writer: &mut T, file: &ZipFileData) // external file attributes writer.write_u32::(file.external_attributes)?; // relative offset of local header - writer.write_u32::(file.header_start as u32)?; + writer.write_u32::(if file.header_start > 0xFFFFFFFF { + 0xFFFFFFFF + } else { + file.header_start as u32 + })?; // file name writer.write_all(file.file_name.as_bytes())?; + // zip64 extra field + writer.write_all(&zip64_extra_field[..zip64_extra_field_length as usize])?; // extra field writer.write_all(&file.extra_field)?; // file comment @@ -784,6 +888,125 @@ fn write_central_directory_header(writer: &mut T, file: &ZipFileData) Ok(()) } +fn validate_extra_data(file: &ZipFileData) -> ZipResult<()> { + let mut data = file.extra_field.as_slice(); + + if data.len() > 0xFFFF { + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::InvalidData, + "Extra data exceeds extra field", + ))); + } + + while data.len() > 0 { + let left = data.len(); + if left < 4 { + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::Other, + "Incomplete extra data header", + ))); + } + let kind = data.read_u16::()?; + let size = data.read_u16::()? as usize; + let left = left - 4; + + if kind == 0x0001 { + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::Other, + "No custom ZIP64 extra data allowed", + ))); + } + + #[cfg(not(feature = "unreserved"))] + { + if kind <= 31 + || [0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690] + .iter() + .any(|&reserved| reserved == kind) + { + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::Other, + "Reserved extra data header ID", + ))); + } + } + + if size > left { + return Err(ZipError::Io(io::Error::new( + io::ErrorKind::Other, + "Extra data size exceeds extra field", + ))); + } + + data = &data[size..]; + } + + Ok(()) +} + +fn write_local_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { + // This entry in the Local header MUST include BOTH original + // and compressed file size fields. + writer.write_u16::(0x0001)?; + writer.write_u16::(16)?; + writer.write_u64::(file.uncompressed_size)?; + writer.write_u64::(file.compressed_size)?; + // Excluded fields: + // u32: disk start number + Ok(()) +} + +fn update_local_zip64_extra_field( + writer: &mut T, + file: &ZipFileData, +) -> ZipResult<()> { + let zip64_extra_field = file.header_start + 30 + file.file_name_raw.len() as u64; + writer.seek(io::SeekFrom::Start(zip64_extra_field + 4))?; + writer.write_u64::(file.uncompressed_size)?; + writer.write_u64::(file.compressed_size)?; + // Excluded fields: + // u32: disk start number + Ok(()) +} + +fn write_central_zip64_extra_field(writer: &mut T, file: &ZipFileData) -> ZipResult { + // The order of the fields in the zip64 extended + // information record is fixed, but the fields MUST + // only appear if the corresponding Local or Central + // directory record field is set to 0xFFFF or 0xFFFFFFFF. + let mut size = 0; + let uncompressed_size = file.uncompressed_size > 0xFFFFFFFF; + let compressed_size = file.compressed_size > 0xFFFFFFFF; + let header_start = file.header_start > 0xFFFFFFFF; + if uncompressed_size { + size += 8; + } + if compressed_size { + size += 8; + } + if header_start { + size += 8; + } + if size > 0 { + writer.write_u16::(0x0001)?; + writer.write_u16::(size)?; + size += 4; + + if uncompressed_size { + writer.write_u64::(file.uncompressed_size)?; + } + if compressed_size { + writer.write_u64::(file.compressed_size)?; + } + if header_start { + writer.write_u64::(file.header_start)?; + } + // Excluded fields: + // u32: disk start number + } + Ok(size) +} + fn path_to_string(path: &std::path::Path) -> String { let mut path_str = String::new(); for component in path.components() { @@ -852,6 +1075,7 @@ mod test { compression_method: CompressionMethod::Stored, last_modified_time: DateTime::default(), permissions: Some(33188), + large_file: false, }; writer.start_file("mimetype", options).unwrap(); writer diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs index 0821433f..1b19d118 100644 --- a/tests/end_to_end.rs +++ b/tests/end_to_end.rs @@ -30,7 +30,7 @@ fn write_to_zip_file(file: &mut Cursor>) -> zip::result::ZipResult<()> { zip.write_all(b"Hello, World!\n")?; zip.start_file_with_extra_data("test_with_extra_data/🐢.txt", options)?; - zip.write_u16::(0)?; + zip.write_u16::(0xbeef)?; zip.write_u16::(EXTRA_DATA.len() as u16)?; zip.write_all(EXTRA_DATA)?; zip.end_extra_data()?; @@ -59,7 +59,7 @@ fn read_zip_file(zip_file: &mut Cursor>) -> zip::result::ZipResult(0)?; + extra_data.write_u16::(0xbeef)?; extra_data.write_u16::(EXTRA_DATA.len() as u16)?; extra_data.write_all(EXTRA_DATA)?; assert_eq!(file_with_extra_data.extra_data(), extra_data.as_slice()); From e9a1de4e1bb5509c408644e65ba72839f494d59c Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Sat, 3 Oct 2020 11:42:13 +0200 Subject: [PATCH 06/10] Fix marking parsed file as large. --- src/read.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/read.rs b/src/read.rs index ed9c5451..ea990e75 100644 --- a/src/read.rs +++ b/src/read.rs @@ -531,13 +531,13 @@ fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> { let mut len_left = len as i64; // Zip64 extended information extra field if kind == 0x0001 { - file.large_file = true; - if file.uncompressed_size == 0xFFFFFFFF { + file.large_file = true; file.uncompressed_size = reader.read_u64::()?; len_left -= 8; } if file.compressed_size == 0xFFFFFFFF { + file.large_file = true; file.compressed_size = reader.read_u64::()?; len_left -= 8; } From a74045654f88e909cde33ed03e43ebe69cadbc14 Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Wed, 7 Oct 2020 10:07:21 +0200 Subject: [PATCH 07/10] Close writer on error during write. --- src/write.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/write.rs b/src/write.rs index 9db89f04..ac769812 100644 --- a/src/write.rs +++ b/src/write.rs @@ -176,6 +176,7 @@ impl Write for ZipWriter { if self.stats.bytes_written > 0xFFFFFFFF && !self.files.last_mut().unwrap().large_file { + let _inner = mem::replace(&mut self.inner, GenericZipWriter::Closed); return Err(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", From 291e9491d570d7ea3d6a7088ab4cd2696d5c889c Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Thu, 8 Oct 2020 10:11:52 +0200 Subject: [PATCH 08/10] Reserve all mapped extra data header IDs. --- src/write.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/write.rs b/src/write.rs index ac769812..6c77f18b 100644 --- a/src/write.rs +++ b/src/write.rs @@ -920,14 +920,13 @@ fn validate_extra_data(file: &ZipFileData) -> ZipResult<()> { #[cfg(not(feature = "unreserved"))] { - if kind <= 31 - || [0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690] - .iter() - .any(|&reserved| reserved == kind) - { + if kind <= 31 || EXTRA_FIELD_MAPPING.iter().any(|&mapped| mapped == kind) { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, - "Reserved extra data header ID", + format!( + "Extra data header ID {:#06} requires crate feature \"unreserved\"", + kind, + ), ))); } } @@ -1105,3 +1104,12 @@ mod test { assert_eq!(path_str, "windows/system32"); } } + +#[cfg(not(feature = "unreserved"))] +const EXTRA_FIELD_MAPPING: [u16; 49] = [ + 0x0001, 0x0007, 0x0008, 0x0009, 0x000a, 0x000c, 0x000d, 0x000e, 0x000f, 0x0014, 0x0015, 0x0016, + 0x0017, 0x0018, 0x0019, 0x0020, 0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690, 0x07c8, 0x2605, + 0x2705, 0x2805, 0x334d, 0x4341, 0x4453, 0x4704, 0x470f, 0x4b46, 0x4c41, 0x4d49, 0x4f4c, 0x5356, + 0x5455, 0x554e, 0x5855, 0x6375, 0x6542, 0x7075, 0x756e, 0x7855, 0xa11e, 0xa220, 0xfd4a, 0x9901, + 0x9902, +]; From 425c54cd5f1918a774f4c9ba255c889235af78e8 Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Wed, 18 Nov 2020 16:27:28 +0100 Subject: [PATCH 09/10] Fix updating local ZIP64 extra field. --- src/write.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/write.rs b/src/write.rs index 71ad91a1..5291accc 100644 --- a/src/write.rs +++ b/src/write.rs @@ -1062,7 +1062,7 @@ fn update_local_zip64_extra_field( writer: &mut T, file: &ZipFileData, ) -> ZipResult<()> { - let zip64_extra_field = file.header_start + 30 + file.file_name_raw.len() as u64; + let zip64_extra_field = file.header_start + 30 + file.file_name.as_bytes().len() as u64; writer.seek(io::SeekFrom::Start(zip64_extra_field + 4))?; writer.write_u64::(file.uncompressed_size)?; writer.write_u64::(file.compressed_size)?; From 129d7301efea99d7b01e67cf48bc55891532188e Mon Sep 17 00:00:00 2001 From: Rouven Spreckels Date: Sun, 7 Feb 2021 12:46:52 +0100 Subject: [PATCH 10/10] Note that `large_file()` requires ZIP64 support. --- src/write.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/write.rs b/src/write.rs index 5291accc..f60a2eaf 100644 --- a/src/write.rs +++ b/src/write.rs @@ -151,8 +151,9 @@ impl FileOptions { /// Set whether the new file's compressed and uncompressed size is less than 4 GiB. /// - /// If set to `false` and the file exceeds the limit, an I/O error is thrown. If set to `true` - /// and the file does not exceed the limit, 20 B are wasted. The default is `false`. + /// If set to `false` and the file exceeds the limit, an I/O error is thrown. If set to `true`, + /// readers will require ZIP64 support and if the file does not exceed the limit, 20 B are + /// wasted. The default is `false`. pub fn large_file(mut self, large: bool) -> FileOptions { self.large_file = large; self