Merge branch 'master' into oldpr369
Signed-off-by: Chris Hennick <4961925+Pr0methean@users.noreply.github.com>
This commit is contained in:
commit
40b0af9494
19 changed files with 828 additions and 290 deletions
14
.github/workflows/ci.yaml
vendored
14
.github/workflows/ci.yaml
vendored
|
@ -6,7 +6,6 @@ on:
|
|||
- 'master'
|
||||
push:
|
||||
branches-ignore:
|
||||
- 'release-plz-**'
|
||||
- 'gh-readonly-queue/**'
|
||||
workflow_dispatch:
|
||||
merge_group:
|
||||
|
@ -22,6 +21,7 @@ jobs:
|
|||
matrix:
|
||||
os: [ubuntu-latest, macOS-latest, windows-latest]
|
||||
rustalias: [stable, nightly, msrv]
|
||||
feature_flag: ["--all-features", "--no-default-features", ""]
|
||||
include:
|
||||
- rustalias: stable
|
||||
rust: stable
|
||||
|
@ -29,7 +29,7 @@ jobs:
|
|||
rust: '1.70'
|
||||
- rustalias: nightly
|
||||
rust: nightly
|
||||
name: 'Build and test: ${{ matrix.os }}, ${{ matrix.rustalias }}'
|
||||
name: 'Build and test ${{ matrix.feature_flag }}: ${{ matrix.os }}, ${{ matrix.rustalias }}'
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
|
@ -44,19 +44,13 @@ jobs:
|
|||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples
|
||||
args: --all ${{ matrix.feature_flag }} --bins --examples
|
||||
|
||||
- name: Tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all
|
||||
|
||||
- name: Tests (no features)
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all --no-default-features
|
||||
args: --all ${{ matrix.feature_flag }}
|
||||
|
||||
style_and_docs:
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
||||
|
|
|
@ -23,15 +23,18 @@ time = { version = "0.3.36", default-features = false }
|
|||
|
||||
[dependencies]
|
||||
aes = { version = "0.8.4", optional = true }
|
||||
byteorder = "1.5.0"
|
||||
bzip2 = { version = "0.4.4", optional = true }
|
||||
chrono = { version = "0.4.38", optional = true }
|
||||
constant_time_eq = { version = "0.3.0", optional = true }
|
||||
crc32fast = "1.4.0"
|
||||
displaydoc = { version = "0.2.4", default-features = false }
|
||||
flate2 = { version = "1.0.28", default-features = false, optional = true }
|
||||
indexmap = "2"
|
||||
hmac = { version = "0.12.1", optional = true, features = ["reset"] }
|
||||
num_enum = "0.7.2"
|
||||
pbkdf2 = { version = "0.12.2", optional = true }
|
||||
sha1 = { version = "0.10.6", optional = true }
|
||||
thiserror = "1.0.48"
|
||||
time = { workspace = true, optional = true, features = [
|
||||
"std",
|
||||
] }
|
||||
|
@ -85,3 +88,7 @@ harness = false
|
|||
[[bench]]
|
||||
name = "read_metadata"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "merge_archive"
|
||||
harness = false
|
||||
|
|
|
@ -70,6 +70,7 @@ See the [examples directory](examples) for:
|
|||
* How to extract a zip file.
|
||||
* How to extract a single file from a zip.
|
||||
* How to read a zip from the standard input.
|
||||
* How to append a directory to an existing archive
|
||||
|
||||
Fuzzing
|
||||
-------
|
||||
|
|
120
benches/merge_archive.rs
Normal file
120
benches/merge_archive.rs
Normal file
|
@ -0,0 +1,120 @@
|
|||
use bencher::{benchmark_group, benchmark_main};
|
||||
|
||||
use std::io::{Cursor, Read, Seek, Write};
|
||||
|
||||
use bencher::Bencher;
|
||||
use getrandom::getrandom;
|
||||
use zip::{result::ZipResult, write::SimpleFileOptions, ZipArchive, ZipWriter};
|
||||
|
||||
fn generate_random_archive(
|
||||
num_entries: usize,
|
||||
entry_size: usize,
|
||||
options: SimpleFileOptions,
|
||||
) -> ZipResult<(usize, ZipArchive<Cursor<Vec<u8>>>)> {
|
||||
let buf = Cursor::new(Vec::new());
|
||||
let mut zip = ZipWriter::new(buf);
|
||||
|
||||
let mut bytes = vec![0u8; entry_size];
|
||||
for i in 0..num_entries {
|
||||
let name = format!("random{}.dat", i);
|
||||
zip.start_file(name, options)?;
|
||||
getrandom(&mut bytes).unwrap();
|
||||
zip.write_all(&bytes)?;
|
||||
}
|
||||
|
||||
let buf = zip.finish()?.into_inner();
|
||||
let len = buf.len();
|
||||
|
||||
Ok((len, ZipArchive::new(Cursor::new(buf))?))
|
||||
}
|
||||
|
||||
fn perform_merge<R: Read + Seek, W: Write + Seek>(
|
||||
src: ZipArchive<R>,
|
||||
mut target: ZipWriter<W>,
|
||||
) -> ZipResult<ZipWriter<W>> {
|
||||
target.merge_archive(src)?;
|
||||
Ok(target)
|
||||
}
|
||||
|
||||
fn perform_raw_copy_file<R: Read + Seek, W: Write + Seek>(
|
||||
mut src: ZipArchive<R>,
|
||||
mut target: ZipWriter<W>,
|
||||
) -> ZipResult<ZipWriter<W>> {
|
||||
for i in 0..src.len() {
|
||||
let entry = src.by_index(i)?;
|
||||
target.raw_copy_file(entry)?;
|
||||
}
|
||||
Ok(target)
|
||||
}
|
||||
|
||||
const NUM_ENTRIES: usize = 100;
|
||||
const ENTRY_SIZE: usize = 1024;
|
||||
|
||||
fn merge_archive_stored(bench: &mut Bencher) {
|
||||
let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored);
|
||||
let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap();
|
||||
|
||||
bench.bytes = len as u64;
|
||||
|
||||
bench.iter(|| {
|
||||
let buf = Cursor::new(Vec::new());
|
||||
let zip = ZipWriter::new(buf);
|
||||
let mut zip = perform_merge(src.clone(), zip).unwrap();
|
||||
let buf = zip.finish().unwrap().into_inner();
|
||||
assert_eq!(buf.len(), len);
|
||||
});
|
||||
}
|
||||
|
||||
fn merge_archive_compressed(bench: &mut Bencher) {
|
||||
let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Deflated);
|
||||
let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap();
|
||||
|
||||
bench.bytes = len as u64;
|
||||
|
||||
bench.iter(|| {
|
||||
let buf = Cursor::new(Vec::new());
|
||||
let zip = ZipWriter::new(buf);
|
||||
let mut zip = perform_merge(src.clone(), zip).unwrap();
|
||||
let buf = zip.finish().unwrap().into_inner();
|
||||
assert_eq!(buf.len(), len);
|
||||
});
|
||||
}
|
||||
|
||||
fn merge_archive_raw_copy_file_stored(bench: &mut Bencher) {
|
||||
let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored);
|
||||
let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap();
|
||||
|
||||
bench.bytes = len as u64;
|
||||
|
||||
bench.iter(|| {
|
||||
let buf = Cursor::new(Vec::new());
|
||||
let zip = ZipWriter::new(buf);
|
||||
let mut zip = perform_raw_copy_file(src.clone(), zip).unwrap();
|
||||
let buf = zip.finish().unwrap().into_inner();
|
||||
assert_eq!(buf.len(), len);
|
||||
});
|
||||
}
|
||||
|
||||
fn merge_archive_raw_copy_file_compressed(bench: &mut Bencher) {
|
||||
let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Deflated);
|
||||
let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap();
|
||||
|
||||
bench.bytes = len as u64;
|
||||
|
||||
bench.iter(|| {
|
||||
let buf = Cursor::new(Vec::new());
|
||||
let zip = ZipWriter::new(buf);
|
||||
let mut zip = perform_raw_copy_file(src.clone(), zip).unwrap();
|
||||
let buf = zip.finish().unwrap().into_inner();
|
||||
assert_eq!(buf.len(), len);
|
||||
});
|
||||
}
|
||||
|
||||
benchmark_group!(
|
||||
benches,
|
||||
merge_archive_stored,
|
||||
merge_archive_compressed,
|
||||
merge_archive_raw_copy_file_stored,
|
||||
merge_archive_raw_copy_file_compressed,
|
||||
);
|
||||
benchmark_main!(benches);
|
63
examples/append.rs
Normal file
63
examples/append.rs
Normal file
|
@ -0,0 +1,63 @@
|
|||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
use zip::write::SimpleFileOptions;
|
||||
|
||||
fn gather_files<'a, T: Into<&'a Path>>(path: T, files: &mut Vec<PathBuf>) {
|
||||
let path: &Path = path.into();
|
||||
|
||||
for entry in path.read_dir().unwrap() {
|
||||
match entry {
|
||||
Ok(e) => {
|
||||
if e.path().is_dir() {
|
||||
gather_files(e.path().as_ref(), files);
|
||||
} else if e.path().is_file() {
|
||||
files.push(e.path());
|
||||
}
|
||||
}
|
||||
Err(_) => todo!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn real_main() -> i32 {
|
||||
let args: Vec<_> = std::env::args().collect();
|
||||
if args.len() < 3 {
|
||||
println!("Usage: {} <existing archive> <folder_to_append>", args[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
let existing_archive_path = &*args[1];
|
||||
let append_dir_path = &*args[2];
|
||||
let archive = PathBuf::from_str(existing_archive_path).unwrap();
|
||||
let to_append = PathBuf::from_str(append_dir_path).unwrap();
|
||||
|
||||
let existing_zip = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.open(archive)
|
||||
.unwrap();
|
||||
let mut append_zip = zip::ZipWriter::new_append(existing_zip).unwrap();
|
||||
|
||||
let mut files: Vec<PathBuf> = vec![];
|
||||
gather_files(to_append.as_ref(), &mut files);
|
||||
|
||||
for file in files {
|
||||
append_zip
|
||||
.start_file(file.to_string_lossy(), SimpleFileOptions::default())
|
||||
.unwrap();
|
||||
|
||||
let mut f = File::open(file).unwrap();
|
||||
let _ = std::io::copy(&mut f, &mut append_zip);
|
||||
}
|
||||
|
||||
append_zip.finish().unwrap();
|
||||
|
||||
0
|
||||
}
|
||||
|
||||
fn main() {
|
||||
std::process::exit(real_main());
|
||||
}
|
|
@ -4,9 +4,9 @@
|
|||
//! different byte order (little endian) than NIST (big endian).
|
||||
//! See [AesCtrZipKeyStream] for more information.
|
||||
|
||||
use crate::unstable::LittleEndianWriteExt;
|
||||
use aes::cipher::generic_array::GenericArray;
|
||||
use aes::cipher::{BlockEncrypt, KeyInit};
|
||||
use byteorder::WriteBytesExt;
|
||||
use std::{any, fmt};
|
||||
|
||||
/// Internal block size of an AES cipher.
|
||||
|
@ -112,7 +112,7 @@ where
|
|||
// Note: AES block size is always 16 bytes, same as u128.
|
||||
self.buffer
|
||||
.as_mut()
|
||||
.write_u128::<byteorder::LittleEndian>(self.counter)
|
||||
.write_u128_le(self.counter)
|
||||
.expect("did not expect u128 le conversion to fail");
|
||||
self.cipher
|
||||
.encrypt_block(GenericArray::from_mut_slice(&mut self.buffer));
|
||||
|
@ -154,7 +154,7 @@ mod tests {
|
|||
|
||||
/// Checks whether `crypt_in_place` produces the correct plaintext after one use and yields the
|
||||
/// cipertext again after applying it again.
|
||||
fn roundtrip<Aes>(key: &[u8], ciphertext: &mut [u8], expected_plaintext: &[u8])
|
||||
fn roundtrip<Aes>(key: &[u8], ciphertext: &[u8], expected_plaintext: &[u8])
|
||||
where
|
||||
Aes: AesKind,
|
||||
Aes::Cipher: KeyInit + BlockEncrypt,
|
||||
|
@ -182,7 +182,7 @@ mod tests {
|
|||
// `7z a -phelloworld -mem=AES256 -mx=0 aes256_40byte.zip 40byte_data.txt`
|
||||
#[test]
|
||||
fn crypt_aes_256_0_byte() {
|
||||
let mut ciphertext = [];
|
||||
let ciphertext = [];
|
||||
let expected_plaintext = &[];
|
||||
let key = [
|
||||
0x0b, 0xec, 0x2e, 0xf2, 0x46, 0xf0, 0x7e, 0x35, 0x16, 0x54, 0xe0, 0x98, 0x10, 0xb3,
|
||||
|
@ -190,36 +190,36 @@ mod tests {
|
|||
0x5c, 0xd0, 0xc0, 0x54,
|
||||
];
|
||||
|
||||
roundtrip::<Aes256>(&key, &mut ciphertext, expected_plaintext);
|
||||
roundtrip::<Aes256>(&key, &ciphertext, expected_plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypt_aes_128_5_byte() {
|
||||
let mut ciphertext = [0x98, 0xa9, 0x8c, 0x26, 0x0e];
|
||||
let ciphertext = [0x98, 0xa9, 0x8c, 0x26, 0x0e];
|
||||
let expected_plaintext = b"asdf\n";
|
||||
let key = [
|
||||
0xe0, 0x25, 0x7b, 0x57, 0x97, 0x6a, 0xa4, 0x23, 0xab, 0x94, 0xaa, 0x44, 0xfd, 0x47,
|
||||
0x4f, 0xa5,
|
||||
];
|
||||
|
||||
roundtrip::<Aes128>(&key, &mut ciphertext, expected_plaintext);
|
||||
roundtrip::<Aes128>(&key, &ciphertext, expected_plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypt_aes_192_5_byte() {
|
||||
let mut ciphertext = [0x36, 0x55, 0x5c, 0x61, 0x3c];
|
||||
let ciphertext = [0x36, 0x55, 0x5c, 0x61, 0x3c];
|
||||
let expected_plaintext = b"asdf\n";
|
||||
let key = [
|
||||
0xe4, 0x4a, 0x88, 0x52, 0x8f, 0xf7, 0x0b, 0x81, 0x7b, 0x75, 0xf1, 0x74, 0x21, 0x37,
|
||||
0x8c, 0x90, 0xad, 0xbe, 0x4a, 0x65, 0xa8, 0x96, 0x0e, 0xcc,
|
||||
];
|
||||
|
||||
roundtrip::<Aes192>(&key, &mut ciphertext, expected_plaintext);
|
||||
roundtrip::<Aes192>(&key, &ciphertext, expected_plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypt_aes_256_5_byte() {
|
||||
let mut ciphertext = [0xc2, 0x47, 0xc0, 0xdc, 0x56];
|
||||
let ciphertext = [0xc2, 0x47, 0xc0, 0xdc, 0x56];
|
||||
let expected_plaintext = b"asdf\n";
|
||||
let key = [
|
||||
0x79, 0x5e, 0x17, 0xf2, 0xc6, 0x3d, 0x28, 0x9b, 0x4b, 0x4b, 0xbb, 0xa9, 0xba, 0xc9,
|
||||
|
@ -227,12 +227,12 @@ mod tests {
|
|||
0x15, 0xb2, 0x86, 0xab,
|
||||
];
|
||||
|
||||
roundtrip::<Aes256>(&key, &mut ciphertext, expected_plaintext);
|
||||
roundtrip::<Aes256>(&key, &ciphertext, expected_plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypt_aes_128_40_byte() {
|
||||
let mut ciphertext = [
|
||||
let ciphertext = [
|
||||
0xcf, 0x72, 0x6b, 0xa1, 0xb2, 0x0f, 0xdf, 0xaa, 0x10, 0xad, 0x9c, 0x7f, 0x6d, 0x1c,
|
||||
0x8d, 0xb5, 0x16, 0x7e, 0xbb, 0x11, 0x69, 0x52, 0x8c, 0x89, 0x80, 0x32, 0xaa, 0x76,
|
||||
0xa6, 0x18, 0x31, 0x98, 0xee, 0xdd, 0x22, 0x68, 0xb7, 0xe6, 0x77, 0xd2,
|
||||
|
@ -243,12 +243,12 @@ mod tests {
|
|||
0x81, 0xb6,
|
||||
];
|
||||
|
||||
roundtrip::<Aes128>(&key, &mut ciphertext, expected_plaintext);
|
||||
roundtrip::<Aes128>(&key, &ciphertext, expected_plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypt_aes_192_40_byte() {
|
||||
let mut ciphertext = [
|
||||
let ciphertext = [
|
||||
0xa6, 0xfc, 0x52, 0x79, 0x2c, 0x6c, 0xfe, 0x68, 0xb1, 0xa8, 0xb3, 0x07, 0x52, 0x8b,
|
||||
0x82, 0xa6, 0x87, 0x9c, 0x72, 0x42, 0x3a, 0xf8, 0xc6, 0xa9, 0xc9, 0xfb, 0x61, 0x19,
|
||||
0x37, 0xb9, 0x56, 0x62, 0xf4, 0xfc, 0x5e, 0x7a, 0xdd, 0x55, 0x0a, 0x48,
|
||||
|
@ -259,12 +259,12 @@ mod tests {
|
|||
0xfe, 0xae, 0x1b, 0xba, 0x01, 0x97, 0x97, 0x79, 0xbb, 0xa6,
|
||||
];
|
||||
|
||||
roundtrip::<Aes192>(&key, &mut ciphertext, expected_plaintext);
|
||||
roundtrip::<Aes192>(&key, &ciphertext, expected_plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crypt_aes_256_40_byte() {
|
||||
let mut ciphertext = [
|
||||
let ciphertext = [
|
||||
0xa9, 0x99, 0xbd, 0xea, 0x82, 0x9b, 0x8f, 0x2f, 0xb7, 0x52, 0x2f, 0x6b, 0xd8, 0xf6,
|
||||
0xab, 0x0e, 0x24, 0x51, 0x9e, 0x18, 0x0f, 0xc0, 0x8f, 0x54, 0x15, 0x80, 0xae, 0xbc,
|
||||
0xa0, 0x5c, 0x8a, 0x11, 0x8d, 0x14, 0x7e, 0xc5, 0xb4, 0xae, 0xd3, 0x37,
|
||||
|
@ -276,6 +276,6 @@ mod tests {
|
|||
0xc2, 0x07, 0x36, 0xb6,
|
||||
];
|
||||
|
||||
roundtrip::<Aes256>(&key, &mut ciphertext, expected_plaintext);
|
||||
roundtrip::<Aes256>(&key, &ciphertext, expected_plaintext);
|
||||
}
|
||||
}
|
||||
|
|
87
src/extra_fields/extended_timestamp.rs
Normal file
87
src/extra_fields/extended_timestamp.rs
Normal file
|
@ -0,0 +1,87 @@
|
|||
use crate::result::{ZipError, ZipResult};
|
||||
use crate::unstable::LittleEndianReadExt;
|
||||
use std::io::Read;
|
||||
|
||||
/// extended timestamp, as described in <https://libzip.org/specifications/extrafld.txt>
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ExtendedTimestamp {
|
||||
mod_time: Option<u32>,
|
||||
ac_time: Option<u32>,
|
||||
cr_time: Option<u32>,
|
||||
}
|
||||
|
||||
impl ExtendedTimestamp {
|
||||
/// creates an extended timestamp struct by reading the required bytes from the reader.
|
||||
///
|
||||
/// This method assumes that the length has already been read, therefore
|
||||
/// it must be passed as an argument
|
||||
pub fn try_from_reader<R>(reader: &mut R, len: u16) -> ZipResult<Self>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
let mut flags = [0u8];
|
||||
reader.read_exact(&mut flags)?;
|
||||
let flags = flags[0];
|
||||
|
||||
// the `flags` field refers to the local headers and might not correspond
|
||||
// to the len field. If the length field is 1+4, we assume that only
|
||||
// the modification time has been set
|
||||
|
||||
// > Those times that are present will appear in the order indicated, but
|
||||
// > any combination of times may be omitted. (Creation time may be
|
||||
// > present without access time, for example.) TSize should equal
|
||||
// > (1 + 4*(number of set bits in Flags)), as the block is currently
|
||||
// > defined.
|
||||
if len != 5 && len as u32 != 1 + 4 * flags.count_ones() {
|
||||
//panic!("found len {len} and flags {flags:08b}");
|
||||
return Err(ZipError::UnsupportedArchive(
|
||||
"flags and len don't match in extended timestamp field",
|
||||
));
|
||||
}
|
||||
|
||||
if flags & 0b11111000 != 0 {
|
||||
return Err(ZipError::UnsupportedArchive(
|
||||
"found unsupported timestamps in the extended timestamp header",
|
||||
));
|
||||
}
|
||||
|
||||
let mod_time = if (flags & 0b00000001u8 == 0b00000001u8) || len == 5 {
|
||||
Some(reader.read_u32_le()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let ac_time = if flags & 0b00000010u8 == 0b00000010u8 && len > 5 {
|
||||
Some(reader.read_u32_le()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let cr_time = if flags & 0b00000100u8 == 0b00000100u8 && len > 5 {
|
||||
Some(reader.read_u32_le()?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(Self {
|
||||
mod_time,
|
||||
ac_time,
|
||||
cr_time,
|
||||
})
|
||||
}
|
||||
|
||||
/// returns the last modification timestamp
|
||||
pub fn mod_time(&self) -> Option<&u32> {
|
||||
self.mod_time.as_ref()
|
||||
}
|
||||
|
||||
/// returns the last access timestamp
|
||||
pub fn ac_time(&self) -> Option<&u32> {
|
||||
self.ac_time.as_ref()
|
||||
}
|
||||
|
||||
/// returns the creation timestamp
|
||||
pub fn cr_time(&self) -> Option<&u32> {
|
||||
self.cr_time.as_ref()
|
||||
}
|
||||
}
|
28
src/extra_fields/mod.rs
Normal file
28
src/extra_fields/mod.rs
Normal file
|
@ -0,0 +1,28 @@
|
|||
//! types for extra fields
|
||||
|
||||
/// marker trait to denote the place where this extra field has been stored
|
||||
pub trait ExtraFieldVersion {}
|
||||
|
||||
/// use this to mark extra fields specified in a local header
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LocalHeaderVersion;
|
||||
|
||||
/// use this to mark extra fields specified in the central header
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CentralHeaderVersion;
|
||||
|
||||
impl ExtraFieldVersion for LocalHeaderVersion {}
|
||||
impl ExtraFieldVersion for CentralHeaderVersion {}
|
||||
|
||||
mod extended_timestamp;
|
||||
|
||||
pub use extended_timestamp::*;
|
||||
|
||||
/// contains one extra field
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ExtraField {
|
||||
/// extended timestamp, as described in <https://libzip.org/specifications/extrafld.txt>
|
||||
ExtendedTimestamp(ExtendedTimestamp),
|
||||
}
|
|
@ -40,12 +40,14 @@ mod aes_ctr;
|
|||
mod compression;
|
||||
mod cp437;
|
||||
mod crc32;
|
||||
pub mod extra_fields;
|
||||
pub mod read;
|
||||
pub mod result;
|
||||
mod spec;
|
||||
mod types;
|
||||
pub mod write;
|
||||
mod zipcrypto;
|
||||
pub use extra_fields::ExtraField;
|
||||
|
||||
#[doc = "Unstable APIs\n\
|
||||
\
|
||||
|
|
259
src/read.rs
259
src/read.rs
|
@ -5,12 +5,13 @@ use crate::aes::{AesReader, AesReaderValid};
|
|||
use crate::compression::CompressionMethod;
|
||||
use crate::cp437::FromCp437;
|
||||
use crate::crc32::Crc32Reader;
|
||||
use crate::extra_fields::{ExtendedTimestamp, ExtraField};
|
||||
use crate::read::zip_archive::Shared;
|
||||
use crate::result::{ZipError, ZipResult};
|
||||
use crate::spec;
|
||||
use crate::types::{AesMode, AesVendorVersion, DateTime, System, ZipFileData};
|
||||
use crate::zipcrypto::{ZipCryptoReader, ZipCryptoReaderValid, ZipCryptoValidator};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use indexmap::IndexMap;
|
||||
use std::borrow::{Borrow, Cow};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{self, prelude::*, SeekFrom};
|
||||
|
@ -47,11 +48,9 @@ pub(crate) mod zip_archive {
|
|||
/// Extract immutable data from `ZipArchive` to make it cheap to clone
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Shared {
|
||||
pub(crate) files: Box<[super::ZipFileData]>,
|
||||
pub(crate) names_map: super::HashMap<Box<str>, usize>,
|
||||
pub(crate) files: super::IndexMap<Box<str>, super::ZipFileData>,
|
||||
pub(super) offset: u64,
|
||||
pub(super) dir_start: u64,
|
||||
pub(super) dir_end: u64,
|
||||
}
|
||||
|
||||
/// ZIP archive reader
|
||||
|
@ -87,6 +86,7 @@ pub(crate) mod zip_archive {
|
|||
use crate::read::lzma::LzmaDecoder;
|
||||
use crate::result::ZipError::InvalidPassword;
|
||||
use crate::spec::path_to_string;
|
||||
use crate::unstable::LittleEndianReadExt;
|
||||
pub use zip_archive::ZipArchive;
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
|
@ -269,15 +269,15 @@ pub(crate) fn find_content<'a>(
|
|||
) -> ZipResult<io::Take<&'a mut dyn Read>> {
|
||||
// Parse local header
|
||||
reader.seek(io::SeekFrom::Start(data.header_start))?;
|
||||
let signature = reader.read_u32::<LittleEndian>()?;
|
||||
let signature = reader.read_u32_le()?;
|
||||
if signature != spec::LOCAL_FILE_HEADER_SIGNATURE {
|
||||
return Err(ZipError::InvalidArchive("Invalid local file header"));
|
||||
}
|
||||
let data_start = match data.data_start.get() {
|
||||
None => {
|
||||
reader.seek(io::SeekFrom::Current(22))?;
|
||||
let file_name_length = reader.read_u16::<LittleEndian>()? as u64;
|
||||
let extra_field_length = reader.read_u16::<LittleEndian>()? as u64;
|
||||
let file_name_length = reader.read_u16_le()? as u64;
|
||||
let extra_field_length = reader.read_u16_le()? as u64;
|
||||
let magic_and_header = 4 + 22 + 2 + 2;
|
||||
let data_start =
|
||||
data.header_start + magic_and_header + file_name_length + extra_field_length;
|
||||
|
@ -414,7 +414,96 @@ pub(crate) struct CentralDirectoryInfo {
|
|||
pub(crate) disk_with_central_directory: u32,
|
||||
}
|
||||
|
||||
impl<R> ZipArchive<R> {
|
||||
pub(crate) fn from_finalized_writer(
|
||||
files: IndexMap<Box<str>, ZipFileData>,
|
||||
comment: Vec<u8>,
|
||||
reader: R,
|
||||
central_start: u64,
|
||||
) -> ZipResult<Self> {
|
||||
if files.is_empty() {
|
||||
return Err(ZipError::InvalidArchive(
|
||||
"attempt to finalize empty zip writer into readable",
|
||||
));
|
||||
}
|
||||
/* This is where the whole file starts. */
|
||||
let (_, first_header) = files.first().unwrap();
|
||||
let initial_offset = first_header.header_start;
|
||||
let shared = Arc::new(zip_archive::Shared {
|
||||
files,
|
||||
offset: initial_offset,
|
||||
dir_start: central_start,
|
||||
});
|
||||
Ok(Self {
|
||||
reader,
|
||||
shared,
|
||||
comment: comment.into_boxed_slice().into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read + Seek> ZipArchive<R> {
|
||||
pub(crate) fn merge_contents<W: Write + io::Seek>(
|
||||
&mut self,
|
||||
mut w: W,
|
||||
) -> ZipResult<IndexMap<Box<str>, ZipFileData>> {
|
||||
let mut new_files = self.shared.files.clone();
|
||||
if new_files.is_empty() {
|
||||
return Ok(IndexMap::new());
|
||||
}
|
||||
/* The first file header will probably start at the beginning of the file, but zip doesn't
|
||||
* enforce that, and executable zips like PEX files will have a shebang line so will
|
||||
* definitely be greater than 0.
|
||||
*
|
||||
* assert_eq!(0, new_files[0].header_start); // Avoid this.
|
||||
*/
|
||||
|
||||
let new_initial_header_start = w.stream_position()?;
|
||||
/* Push back file header starts for all entries in the covered files. */
|
||||
new_files.values_mut().try_for_each(|f| {
|
||||
/* This is probably the only really important thing to change. */
|
||||
f.header_start = f.header_start.checked_add(new_initial_header_start).ok_or(
|
||||
ZipError::InvalidArchive("new header start from merge would have been too large"),
|
||||
)?;
|
||||
/* This is only ever used internally to cache metadata lookups (it's not part of the
|
||||
* zip spec), and 0 is the sentinel value. */
|
||||
f.central_header_start = 0;
|
||||
/* This is an atomic variable so it can be updated from another thread in the
|
||||
* implementation (which is good!). */
|
||||
if let Some(old_data_start) = f.data_start.take() {
|
||||
let new_data_start = old_data_start.checked_add(new_initial_header_start).ok_or(
|
||||
ZipError::InvalidArchive("new data start from merge would have been too large"),
|
||||
)?;
|
||||
f.data_start.get_or_init(|| new_data_start);
|
||||
}
|
||||
Ok::<_, ZipError>(())
|
||||
})?;
|
||||
|
||||
/* Rewind to the beginning of the file.
|
||||
*
|
||||
* NB: we *could* decide to start copying from new_files[0].header_start instead, which
|
||||
* would avoid copying over e.g. any pex shebangs or other file contents that start before
|
||||
* the first zip file entry. However, zip files actually shouldn't care about garbage data
|
||||
* in *between* real entries, since the central directory header records the correct start
|
||||
* location of each, and keeping track of that math is more complicated logic that will only
|
||||
* rarely be used, since most zips that get merged together are likely to be produced
|
||||
* specifically for that purpose (and therefore are unlikely to have a shebang or other
|
||||
* preface). Finally, this preserves any data that might actually be useful.
|
||||
*/
|
||||
self.reader.rewind()?;
|
||||
/* Find the end of the file data. */
|
||||
let length_to_read = self.shared.dir_start;
|
||||
/* Produce a Read that reads bytes up until the start of the central directory header.
|
||||
* This "as &mut dyn Read" trick is used elsewhere to avoid having to clone the underlying
|
||||
* handle, which it really shouldn't need to anyway. */
|
||||
let mut limited_raw = (&mut self.reader as &mut dyn Read).take(length_to_read);
|
||||
/* Copy over file data from source archive directly. */
|
||||
io::copy(&mut limited_raw, &mut w)?;
|
||||
|
||||
/* Return the files we've just written to the data stream. */
|
||||
Ok(new_files)
|
||||
}
|
||||
|
||||
fn get_directory_info_zip32(
|
||||
footer: &spec::CentralDirectoryEnd,
|
||||
cde_start_pos: u64,
|
||||
|
@ -565,29 +654,25 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
result.and_then(|dir_info| {
|
||||
// If the parsed number of files is greater than the offset then
|
||||
// something fishy is going on and we shouldn't trust number_of_files.
|
||||
let file_capacity = if dir_info.number_of_files > cde_start_pos as usize {
|
||||
0
|
||||
} else {
|
||||
dir_info.number_of_files
|
||||
};
|
||||
let mut files = Vec::with_capacity(file_capacity);
|
||||
let mut names_map = HashMap::with_capacity(file_capacity);
|
||||
let file_capacity =
|
||||
if dir_info.number_of_files > dir_info.directory_start as usize {
|
||||
0
|
||||
} else {
|
||||
dir_info.number_of_files
|
||||
};
|
||||
let mut files = IndexMap::with_capacity(file_capacity);
|
||||
reader.seek(io::SeekFrom::Start(dir_info.directory_start))?;
|
||||
for _ in 0..dir_info.number_of_files {
|
||||
let file = central_header_to_zip_file(reader, dir_info.archive_offset)?;
|
||||
names_map.insert(file.file_name.clone(), files.len());
|
||||
files.push(file);
|
||||
files.insert(file.file_name.clone(), file);
|
||||
}
|
||||
let dir_end = reader.seek(io::SeekFrom::Start(dir_info.directory_start))?;
|
||||
if dir_info.disk_number != dir_info.disk_with_central_directory {
|
||||
unsupported_zip_error("Support for multi-disk files is not implemented")
|
||||
} else {
|
||||
Ok(Shared {
|
||||
files: files.into(),
|
||||
names_map,
|
||||
files,
|
||||
offset: dir_info.archive_offset,
|
||||
dir_start: dir_info.directory_start,
|
||||
dir_end,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -607,7 +692,7 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
}
|
||||
let shared = ok_results
|
||||
.into_iter()
|
||||
.max_by_key(|shared| shared.dir_end)
|
||||
.max_by_key(|shared| shared.dir_start)
|
||||
.unwrap();
|
||||
reader.seek(io::SeekFrom::Start(shared.dir_start))?;
|
||||
Ok(shared)
|
||||
|
@ -689,7 +774,7 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
|
||||
/// Returns an iterator over all the file and directory names in this archive.
|
||||
pub fn file_names(&self) -> impl Iterator<Item = &str> {
|
||||
self.shared.names_map.keys().map(Box::borrow)
|
||||
self.shared.files.keys().map(|s| s.as_ref())
|
||||
}
|
||||
|
||||
/// Search for a file entry by name, decrypt with given password
|
||||
|
@ -717,7 +802,7 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
/// Get the index of a file entry by name, if it's present.
|
||||
#[inline(always)]
|
||||
pub fn index_for_name(&self, name: &str) -> Option<usize> {
|
||||
self.shared.names_map.get(name).copied()
|
||||
self.shared.files.get_index_of(name)
|
||||
}
|
||||
|
||||
/// Get the index of a file entry by path, if it's present.
|
||||
|
@ -731,8 +816,8 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
pub fn name_for_index(&self, index: usize) -> Option<&str> {
|
||||
self.shared
|
||||
.files
|
||||
.get(index)
|
||||
.map(|file_data| &*file_data.file_name)
|
||||
.get_index(index)
|
||||
.map(|(name, _)| name.as_ref())
|
||||
}
|
||||
|
||||
/// Search for a file entry by name and return a seekable object.
|
||||
|
@ -774,7 +859,7 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
name: &str,
|
||||
password: Option<&[u8]>,
|
||||
) -> ZipResult<ZipFile<'a>> {
|
||||
let Some(index) = self.index_for_name(name) else {
|
||||
let Some(index) = self.shared.files.get_index_of(name) else {
|
||||
return Err(ZipError::FileNotFound);
|
||||
};
|
||||
self.by_index_with_optional_password(index, password)
|
||||
|
@ -809,17 +894,16 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
/// Get a contained file by index without decompressing it
|
||||
pub fn by_index_raw(&mut self, file_number: usize) -> ZipResult<ZipFile<'_>> {
|
||||
let reader = &mut self.reader;
|
||||
self.shared
|
||||
let (_, data) = self
|
||||
.shared
|
||||
.files
|
||||
.get(file_number)
|
||||
.ok_or(ZipError::FileNotFound)
|
||||
.and_then(move |data| {
|
||||
Ok(ZipFile {
|
||||
crypto_reader: None,
|
||||
reader: ZipFileReader::Raw(find_content(data, reader)?),
|
||||
data: Cow::Borrowed(data),
|
||||
})
|
||||
})
|
||||
.get_index(file_number)
|
||||
.ok_or(ZipError::FileNotFound)?;
|
||||
Ok(ZipFile {
|
||||
crypto_reader: None,
|
||||
reader: ZipFileReader::Raw(find_content(data, reader)?),
|
||||
data: Cow::Borrowed(data),
|
||||
})
|
||||
}
|
||||
|
||||
fn by_index_with_optional_password(
|
||||
|
@ -827,10 +911,10 @@ impl<R: Read + Seek> ZipArchive<R> {
|
|||
file_number: usize,
|
||||
mut password: Option<&[u8]>,
|
||||
) -> ZipResult<ZipFile<'_>> {
|
||||
let data = self
|
||||
let (_, data) = self
|
||||
.shared
|
||||
.files
|
||||
.get(file_number)
|
||||
.get_index(file_number)
|
||||
.ok_or(ZipError::FileNotFound)?;
|
||||
|
||||
match (password, data.encrypted) {
|
||||
|
@ -878,7 +962,7 @@ pub(crate) fn central_header_to_zip_file<R: Read + Seek>(
|
|||
let central_header_start = reader.stream_position()?;
|
||||
|
||||
// Parse central header
|
||||
let signature = reader.read_u32::<LittleEndian>()?;
|
||||
let signature = reader.read_u32_le()?;
|
||||
if signature != spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE {
|
||||
Err(ZipError::InvalidArchive("Invalid Central Directory header"))
|
||||
} else {
|
||||
|
@ -892,25 +976,25 @@ fn central_header_to_zip_file_inner<R: Read>(
|
|||
archive_offset: u64,
|
||||
central_header_start: u64,
|
||||
) -> ZipResult<ZipFileData> {
|
||||
let version_made_by = reader.read_u16::<LittleEndian>()?;
|
||||
let _version_to_extract = reader.read_u16::<LittleEndian>()?;
|
||||
let flags = reader.read_u16::<LittleEndian>()?;
|
||||
let version_made_by = reader.read_u16_le()?;
|
||||
let _version_to_extract = reader.read_u16_le()?;
|
||||
let flags = reader.read_u16_le()?;
|
||||
let encrypted = flags & 1 == 1;
|
||||
let is_utf8 = flags & (1 << 11) != 0;
|
||||
let using_data_descriptor = flags & (1 << 3) != 0;
|
||||
let compression_method = reader.read_u16::<LittleEndian>()?;
|
||||
let last_mod_time = reader.read_u16::<LittleEndian>()?;
|
||||
let last_mod_date = reader.read_u16::<LittleEndian>()?;
|
||||
let crc32 = reader.read_u32::<LittleEndian>()?;
|
||||
let compressed_size = reader.read_u32::<LittleEndian>()?;
|
||||
let uncompressed_size = reader.read_u32::<LittleEndian>()?;
|
||||
let file_name_length = reader.read_u16::<LittleEndian>()? as usize;
|
||||
let extra_field_length = reader.read_u16::<LittleEndian>()? as usize;
|
||||
let file_comment_length = reader.read_u16::<LittleEndian>()? as usize;
|
||||
let _disk_number = reader.read_u16::<LittleEndian>()?;
|
||||
let _internal_file_attributes = reader.read_u16::<LittleEndian>()?;
|
||||
let external_file_attributes = reader.read_u32::<LittleEndian>()?;
|
||||
let offset = reader.read_u32::<LittleEndian>()? as u64;
|
||||
let compression_method = reader.read_u16_le()?;
|
||||
let last_mod_time = reader.read_u16_le()?;
|
||||
let last_mod_date = reader.read_u16_le()?;
|
||||
let crc32 = reader.read_u32_le()?;
|
||||
let compressed_size = reader.read_u32_le()?;
|
||||
let uncompressed_size = reader.read_u32_le()?;
|
||||
let file_name_length = reader.read_u16_le()? as usize;
|
||||
let extra_field_length = reader.read_u16_le()? as usize;
|
||||
let file_comment_length = reader.read_u16_le()? as usize;
|
||||
let _disk_number = reader.read_u16_le()?;
|
||||
let _internal_file_attributes = reader.read_u16_le()?;
|
||||
let external_file_attributes = reader.read_u32_le()?;
|
||||
let offset = reader.read_u32_le()? as u64;
|
||||
let mut file_name_raw = vec![0; file_name_length];
|
||||
reader.read_exact(&mut file_name_raw)?;
|
||||
let mut extra_field = vec![0; extra_field_length];
|
||||
|
@ -929,7 +1013,7 @@ fn central_header_to_zip_file_inner<R: Read>(
|
|||
|
||||
// Construct the result
|
||||
let mut result = ZipFileData {
|
||||
system: System::from_u8((version_made_by >> 8) as u8),
|
||||
system: System::from((version_made_by >> 8) as u8),
|
||||
version_made_by: version_made_by as u8,
|
||||
encrypted,
|
||||
using_data_descriptor,
|
||||
|
@ -953,6 +1037,7 @@ fn central_header_to_zip_file_inner<R: Read>(
|
|||
external_attributes: external_file_attributes,
|
||||
large_file: false,
|
||||
aes_mode: None,
|
||||
extra_fields: Vec::new(),
|
||||
};
|
||||
|
||||
match parse_extra_field(&mut result) {
|
||||
|
@ -983,24 +1068,24 @@ fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> {
|
|||
let mut reader = io::Cursor::new(extra_field.as_ref());
|
||||
|
||||
while (reader.position() as usize) < extra_field.len() {
|
||||
let kind = reader.read_u16::<LittleEndian>()?;
|
||||
let len = reader.read_u16::<LittleEndian>()?;
|
||||
let kind = reader.read_u16_le()?;
|
||||
let len = reader.read_u16_le()?;
|
||||
let mut len_left = len as i64;
|
||||
match kind {
|
||||
// Zip64 extended information extra field
|
||||
0x0001 => {
|
||||
if file.uncompressed_size == spec::ZIP64_BYTES_THR {
|
||||
file.large_file = true;
|
||||
file.uncompressed_size = reader.read_u64::<LittleEndian>()?;
|
||||
file.uncompressed_size = reader.read_u64_le()?;
|
||||
len_left -= 8;
|
||||
}
|
||||
if file.compressed_size == spec::ZIP64_BYTES_THR {
|
||||
file.large_file = true;
|
||||
file.compressed_size = reader.read_u64::<LittleEndian>()?;
|
||||
file.compressed_size = reader.read_u64_le()?;
|
||||
len_left -= 8;
|
||||
}
|
||||
if file.header_start == spec::ZIP64_BYTES_THR {
|
||||
file.header_start = reader.read_u64::<LittleEndian>()?;
|
||||
file.header_start = reader.read_u64_le()?;
|
||||
len_left -= 8;
|
||||
}
|
||||
}
|
||||
|
@ -1011,10 +1096,12 @@ fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> {
|
|||
"AES extra data field has an unsupported length",
|
||||
));
|
||||
}
|
||||
let vendor_version = reader.read_u16::<LittleEndian>()?;
|
||||
let vendor_id = reader.read_u16::<LittleEndian>()?;
|
||||
let aes_mode = reader.read_u8()?;
|
||||
let compression_method = reader.read_u16::<LittleEndian>()?;
|
||||
let vendor_version = reader.read_u16_le()?;
|
||||
let vendor_id = reader.read_u16_le()?;
|
||||
let mut out = [0u8];
|
||||
reader.read_exact(&mut out)?;
|
||||
let aes_mode = out[0];
|
||||
let compression_method = reader.read_u16_le()?;
|
||||
|
||||
if vendor_id != 0x4541 {
|
||||
return Err(ZipError::InvalidArchive("Invalid AES vendor"));
|
||||
|
@ -1035,6 +1122,17 @@ fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<()> {
|
|||
CompressionMethod::from_u16(compression_method)
|
||||
};
|
||||
}
|
||||
0x5455 => {
|
||||
// extended timestamp
|
||||
// https://libzip.org/specifications/extrafld.txt
|
||||
|
||||
file.extra_fields.push(ExtraField::ExtendedTimestamp(
|
||||
ExtendedTimestamp::try_from_reader(&mut reader, len)?,
|
||||
));
|
||||
|
||||
// the reader for ExtendedTimestamp consumes `len` bytes
|
||||
len_left = 0;
|
||||
}
|
||||
_ => {
|
||||
// Other fields are ignored
|
||||
}
|
||||
|
@ -1216,6 +1314,11 @@ impl <'a> HasZipMetadata for ZipFile<'a> {
|
|||
fn get_metadata(&self) -> &ZipFileData {
|
||||
self.data.as_ref()
|
||||
}
|
||||
|
||||
/// iterate through all extra fields
|
||||
pub fn extra_data_fields(&self) -> impl Iterator<Item = &ExtraField> {
|
||||
self.data.extra_fields.iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Read for ZipFile<'a> {
|
||||
|
@ -1265,6 +1368,7 @@ impl<'a> Drop for ZipFile<'a> {
|
|||
}
|
||||
};
|
||||
|
||||
#[allow(clippy::unused_io_amount)]
|
||||
loop {
|
||||
match reader.read(&mut buffer) {
|
||||
Ok(0) => break,
|
||||
|
@ -1295,7 +1399,7 @@ impl<'a> Drop for ZipFile<'a> {
|
|||
/// * `data_start`: set to 0
|
||||
/// * `external_attributes`: `unix_mode()`: will return None
|
||||
pub fn read_zipfile_from_stream<'a, R: Read>(reader: &'a mut R) -> ZipResult<Option<ZipFile<'_>>> {
|
||||
let signature = reader.read_u32::<LittleEndian>()?;
|
||||
let signature = reader.read_u32_le()?;
|
||||
|
||||
match signature {
|
||||
spec::LOCAL_FILE_HEADER_SIGNATURE => (),
|
||||
|
@ -1303,20 +1407,20 @@ pub fn read_zipfile_from_stream<'a, R: Read>(reader: &'a mut R) -> ZipResult<Opt
|
|||
_ => return Err(ZipError::InvalidArchive("Invalid local file header")),
|
||||
}
|
||||
|
||||
let version_made_by = reader.read_u16::<LittleEndian>()?;
|
||||
let flags = reader.read_u16::<LittleEndian>()?;
|
||||
let version_made_by = reader.read_u16_le()?;
|
||||
let flags = reader.read_u16_le()?;
|
||||
let encrypted = flags & 1 == 1;
|
||||
let is_utf8 = flags & (1 << 11) != 0;
|
||||
let using_data_descriptor = flags & (1 << 3) != 0;
|
||||
#[allow(deprecated)]
|
||||
let compression_method = CompressionMethod::from_u16(reader.read_u16::<LittleEndian>()?);
|
||||
let last_mod_time = reader.read_u16::<LittleEndian>()?;
|
||||
let last_mod_date = reader.read_u16::<LittleEndian>()?;
|
||||
let crc32 = reader.read_u32::<LittleEndian>()?;
|
||||
let compressed_size = reader.read_u32::<LittleEndian>()?;
|
||||
let uncompressed_size = reader.read_u32::<LittleEndian>()?;
|
||||
let file_name_length = reader.read_u16::<LittleEndian>()? as usize;
|
||||
let extra_field_length = reader.read_u16::<LittleEndian>()? as usize;
|
||||
let compression_method = CompressionMethod::from_u16(reader.read_u16_le()?);
|
||||
let last_mod_time = reader.read_u16_le()?;
|
||||
let last_mod_date = reader.read_u16_le()?;
|
||||
let crc32 = reader.read_u32_le()?;
|
||||
let compressed_size = reader.read_u32_le()?;
|
||||
let uncompressed_size = reader.read_u32_le()?;
|
||||
let file_name_length = reader.read_u16_le()? as usize;
|
||||
let extra_field_length = reader.read_u16_le()? as usize;
|
||||
|
||||
let mut file_name_raw = vec![0; file_name_length];
|
||||
reader.read_exact(&mut file_name_raw)?;
|
||||
|
@ -1329,7 +1433,7 @@ pub fn read_zipfile_from_stream<'a, R: Read>(reader: &'a mut R) -> ZipResult<Opt
|
|||
};
|
||||
|
||||
let mut result = ZipFileData {
|
||||
system: System::from_u8((version_made_by >> 8) as u8),
|
||||
system: System::from((version_made_by >> 8) as u8),
|
||||
version_made_by: version_made_by as u8,
|
||||
encrypted,
|
||||
using_data_descriptor,
|
||||
|
@ -1355,6 +1459,7 @@ pub fn read_zipfile_from_stream<'a, R: Read>(reader: &'a mut R) -> ZipResult<Opt
|
|||
external_attributes: 0,
|
||||
large_file: false,
|
||||
aes_mode: None,
|
||||
extra_fields: Vec::new(),
|
||||
};
|
||||
|
||||
match parse_extra_field(&mut result) {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use crate::unstable::LittleEndianReadExt;
|
||||
use std::fs;
|
||||
use std::io::{self, Read};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -7,8 +8,6 @@ use super::{
|
|||
ZipFileData, ZipResult, HasZipMetadata,
|
||||
};
|
||||
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
|
||||
/// Stream decoder for zip.
|
||||
#[derive(Debug)]
|
||||
pub struct ZipStreamReader<R>(R);
|
||||
|
@ -28,7 +27,7 @@ impl<R: Read> ZipStreamReader<R> {
|
|||
let central_header_start = 0;
|
||||
|
||||
// Parse central header
|
||||
let signature = self.0.read_u32::<LittleEndian>()?;
|
||||
let signature = self.0.read_u32_le()?;
|
||||
if signature != spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE {
|
||||
Ok(None)
|
||||
} else {
|
||||
|
|
51
src/result.rs
Normal file → Executable file
51
src/result.rs
Normal file → Executable file
|
@ -1,67 +1,38 @@
|
|||
#![allow(unknown_lints)] // non_local_definitions isn't in Rust 1.70
|
||||
#![allow(non_local_definitions)]
|
||||
//! Error types that can be emitted from this library
|
||||
|
||||
use displaydoc::Display;
|
||||
use thiserror::Error;
|
||||
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::IntoInnerError;
|
||||
use std::num::TryFromIntError;
|
||||
|
||||
/// Generic result type with ZipError as its error variant
|
||||
pub type ZipResult<T> = Result<T, ZipError>;
|
||||
|
||||
/// Error type for Zip
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Display, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum ZipError {
|
||||
/// An Error caused by I/O
|
||||
Io(io::Error),
|
||||
/// i/o error: {0}
|
||||
Io(#[from] io::Error),
|
||||
|
||||
/// This file is probably not a zip archive
|
||||
/// invalid Zip archive: {0}
|
||||
InvalidArchive(&'static str),
|
||||
|
||||
/// This archive is not supported
|
||||
/// unsupported Zip archive: {0}
|
||||
UnsupportedArchive(&'static str),
|
||||
|
||||
/// The requested file could not be found in the archive
|
||||
/// specified file not found in archive
|
||||
FileNotFound,
|
||||
|
||||
/// The password provided is incorrect
|
||||
InvalidPassword,
|
||||
}
|
||||
|
||||
impl From<io::Error> for ZipError {
|
||||
fn from(err: io::Error) -> ZipError {
|
||||
ZipError::Io(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W> From<IntoInnerError<W>> for ZipError {
|
||||
fn from(value: IntoInnerError<W>) -> Self {
|
||||
ZipError::Io(value.into_error())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ZipError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
ZipError::Io(err) => write!(fmt, "{err}"),
|
||||
ZipError::InvalidArchive(err) => write!(fmt, "invalid Zip archive: {err}"),
|
||||
ZipError::UnsupportedArchive(err) => write!(fmt, "unsupported Zip archive: {err}"),
|
||||
ZipError::FileNotFound => write!(fmt, "specified file not found in archive"),
|
||||
ZipError::InvalidPassword => write!(fmt, "incorrect password for encrypted file"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ZipError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
match self {
|
||||
ZipError::Io(err) => Some(err),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ZipError {
|
||||
/// The text used as an error when a password is required and not supplied
|
||||
///
|
||||
|
|
92
src/spec.rs
92
src/spec.rs
|
@ -1,5 +1,5 @@
|
|||
use crate::result::{ZipError, ZipResult};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use crate::unstable::{LittleEndianReadExt, LittleEndianWriteExt};
|
||||
use std::borrow::Cow;
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
|
@ -26,17 +26,17 @@ pub struct CentralDirectoryEnd {
|
|||
|
||||
impl CentralDirectoryEnd {
|
||||
pub fn parse<T: Read>(reader: &mut T) -> ZipResult<CentralDirectoryEnd> {
|
||||
let magic = reader.read_u32::<LittleEndian>()?;
|
||||
let magic = reader.read_u32_le()?;
|
||||
if magic != CENTRAL_DIRECTORY_END_SIGNATURE {
|
||||
return Err(ZipError::InvalidArchive("Invalid digital signature header"));
|
||||
}
|
||||
let disk_number = reader.read_u16::<LittleEndian>()?;
|
||||
let disk_with_central_directory = reader.read_u16::<LittleEndian>()?;
|
||||
let number_of_files_on_this_disk = reader.read_u16::<LittleEndian>()?;
|
||||
let number_of_files = reader.read_u16::<LittleEndian>()?;
|
||||
let central_directory_size = reader.read_u32::<LittleEndian>()?;
|
||||
let central_directory_offset = reader.read_u32::<LittleEndian>()?;
|
||||
let zip_file_comment_length = reader.read_u16::<LittleEndian>()? as usize;
|
||||
let disk_number = reader.read_u16_le()?;
|
||||
let disk_with_central_directory = reader.read_u16_le()?;
|
||||
let number_of_files_on_this_disk = reader.read_u16_le()?;
|
||||
let number_of_files = reader.read_u16_le()?;
|
||||
let central_directory_size = reader.read_u32_le()?;
|
||||
let central_directory_offset = reader.read_u32_le()?;
|
||||
let zip_file_comment_length = reader.read_u16_le()? as usize;
|
||||
let mut zip_file_comment = vec![0; zip_file_comment_length];
|
||||
reader.read_exact(&mut zip_file_comment)?;
|
||||
|
||||
|
@ -65,7 +65,7 @@ impl CentralDirectoryEnd {
|
|||
let mut pos = file_length - HEADER_SIZE;
|
||||
while pos >= search_upper_bound {
|
||||
reader.seek(io::SeekFrom::Start(pos))?;
|
||||
if reader.read_u32::<LittleEndian>()? == CENTRAL_DIRECTORY_END_SIGNATURE {
|
||||
if reader.read_u32_le()? == CENTRAL_DIRECTORY_END_SIGNATURE {
|
||||
reader.seek(io::SeekFrom::Current(
|
||||
BYTES_BETWEEN_MAGIC_AND_COMMENT_SIZE as i64,
|
||||
))?;
|
||||
|
@ -85,14 +85,14 @@ impl CentralDirectoryEnd {
|
|||
}
|
||||
|
||||
pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
|
||||
writer.write_u32::<LittleEndian>(CENTRAL_DIRECTORY_END_SIGNATURE)?;
|
||||
writer.write_u16::<LittleEndian>(self.disk_number)?;
|
||||
writer.write_u16::<LittleEndian>(self.disk_with_central_directory)?;
|
||||
writer.write_u16::<LittleEndian>(self.number_of_files_on_this_disk)?;
|
||||
writer.write_u16::<LittleEndian>(self.number_of_files)?;
|
||||
writer.write_u32::<LittleEndian>(self.central_directory_size)?;
|
||||
writer.write_u32::<LittleEndian>(self.central_directory_offset)?;
|
||||
writer.write_u16::<LittleEndian>(self.zip_file_comment.len() as u16)?;
|
||||
writer.write_u32_le(CENTRAL_DIRECTORY_END_SIGNATURE)?;
|
||||
writer.write_u16_le(self.disk_number)?;
|
||||
writer.write_u16_le(self.disk_with_central_directory)?;
|
||||
writer.write_u16_le(self.number_of_files_on_this_disk)?;
|
||||
writer.write_u16_le(self.number_of_files)?;
|
||||
writer.write_u32_le(self.central_directory_size)?;
|
||||
writer.write_u32_le(self.central_directory_offset)?;
|
||||
writer.write_u16_le(self.zip_file_comment.len() as u16)?;
|
||||
writer.write_all(&self.zip_file_comment)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -106,15 +106,15 @@ pub struct Zip64CentralDirectoryEndLocator {
|
|||
|
||||
impl Zip64CentralDirectoryEndLocator {
|
||||
pub fn parse<T: Read>(reader: &mut T) -> ZipResult<Zip64CentralDirectoryEndLocator> {
|
||||
let magic = reader.read_u32::<LittleEndian>()?;
|
||||
let magic = reader.read_u32_le()?;
|
||||
if magic != ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE {
|
||||
return Err(ZipError::InvalidArchive(
|
||||
"Invalid zip64 locator digital signature header",
|
||||
));
|
||||
}
|
||||
let disk_with_central_directory = reader.read_u32::<LittleEndian>()?;
|
||||
let end_of_central_directory_offset = reader.read_u64::<LittleEndian>()?;
|
||||
let number_of_disks = reader.read_u32::<LittleEndian>()?;
|
||||
let disk_with_central_directory = reader.read_u32_le()?;
|
||||
let end_of_central_directory_offset = reader.read_u64_le()?;
|
||||
let number_of_disks = reader.read_u32_le()?;
|
||||
|
||||
Ok(Zip64CentralDirectoryEndLocator {
|
||||
disk_with_central_directory,
|
||||
|
@ -124,10 +124,10 @@ impl Zip64CentralDirectoryEndLocator {
|
|||
}
|
||||
|
||||
pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
|
||||
writer.write_u32::<LittleEndian>(ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE)?;
|
||||
writer.write_u32::<LittleEndian>(self.disk_with_central_directory)?;
|
||||
writer.write_u64::<LittleEndian>(self.end_of_central_directory_offset)?;
|
||||
writer.write_u32::<LittleEndian>(self.number_of_disks)?;
|
||||
writer.write_u32_le(ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE)?;
|
||||
writer.write_u32_le(self.disk_with_central_directory)?;
|
||||
writer.write_u64_le(self.end_of_central_directory_offset)?;
|
||||
writer.write_u32_le(self.number_of_disks)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -156,20 +156,20 @@ impl Zip64CentralDirectoryEnd {
|
|||
while pos >= nominal_offset {
|
||||
reader.seek(io::SeekFrom::Start(pos))?;
|
||||
|
||||
if reader.read_u32::<LittleEndian>()? == ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE {
|
||||
if reader.read_u32_le()? == ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE {
|
||||
let archive_offset = pos - nominal_offset;
|
||||
|
||||
let _record_size = reader.read_u64::<LittleEndian>()?;
|
||||
let _record_size = reader.read_u64_le()?;
|
||||
// We would use this value if we did anything with the "zip64 extensible data sector".
|
||||
|
||||
let version_made_by = reader.read_u16::<LittleEndian>()?;
|
||||
let version_needed_to_extract = reader.read_u16::<LittleEndian>()?;
|
||||
let disk_number = reader.read_u32::<LittleEndian>()?;
|
||||
let disk_with_central_directory = reader.read_u32::<LittleEndian>()?;
|
||||
let number_of_files_on_this_disk = reader.read_u64::<LittleEndian>()?;
|
||||
let number_of_files = reader.read_u64::<LittleEndian>()?;
|
||||
let central_directory_size = reader.read_u64::<LittleEndian>()?;
|
||||
let central_directory_offset = reader.read_u64::<LittleEndian>()?;
|
||||
let version_made_by = reader.read_u16_le()?;
|
||||
let version_needed_to_extract = reader.read_u16_le()?;
|
||||
let disk_number = reader.read_u32_le()?;
|
||||
let disk_with_central_directory = reader.read_u32_le()?;
|
||||
let number_of_files_on_this_disk = reader.read_u64_le()?;
|
||||
let number_of_files = reader.read_u64_le()?;
|
||||
let central_directory_size = reader.read_u64_le()?;
|
||||
let central_directory_offset = reader.read_u64_le()?;
|
||||
|
||||
results.push((
|
||||
Zip64CentralDirectoryEnd {
|
||||
|
@ -201,16 +201,16 @@ impl Zip64CentralDirectoryEnd {
|
|||
}
|
||||
|
||||
pub fn write<T: Write>(&self, writer: &mut T) -> ZipResult<()> {
|
||||
writer.write_u32::<LittleEndian>(ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE)?;
|
||||
writer.write_u64::<LittleEndian>(44)?; // record size
|
||||
writer.write_u16::<LittleEndian>(self.version_made_by)?;
|
||||
writer.write_u16::<LittleEndian>(self.version_needed_to_extract)?;
|
||||
writer.write_u32::<LittleEndian>(self.disk_number)?;
|
||||
writer.write_u32::<LittleEndian>(self.disk_with_central_directory)?;
|
||||
writer.write_u64::<LittleEndian>(self.number_of_files_on_this_disk)?;
|
||||
writer.write_u64::<LittleEndian>(self.number_of_files)?;
|
||||
writer.write_u64::<LittleEndian>(self.central_directory_size)?;
|
||||
writer.write_u64::<LittleEndian>(self.central_directory_offset)?;
|
||||
writer.write_u32_le(ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE)?;
|
||||
writer.write_u64_le(44)?; // record size
|
||||
writer.write_u16_le(self.version_made_by)?;
|
||||
writer.write_u16_le(self.version_needed_to_extract)?;
|
||||
writer.write_u32_le(self.disk_number)?;
|
||||
writer.write_u32_le(self.disk_with_central_directory)?;
|
||||
writer.write_u64_le(self.number_of_files_on_this_disk)?;
|
||||
writer.write_u64_le(self.number_of_files)?;
|
||||
writer.write_u64_le(self.central_directory_size)?;
|
||||
writer.write_u64_le(self.central_directory_offset)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
34
src/types.rs
34
src/types.rs
|
@ -1,4 +1,5 @@
|
|||
//! Types that specify what is contained in a ZIP.
|
||||
use num_enum::{FromPrimitive, IntoPrimitive};
|
||||
use path::{Component, Path, PathBuf};
|
||||
use std::path;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
@ -45,29 +46,20 @@ mod atomic {
|
|||
}
|
||||
}
|
||||
|
||||
use crate::extra_fields::ExtraField;
|
||||
use crate::result::DateTimeRangeError;
|
||||
#[cfg(feature = "time")]
|
||||
use time::{error::ComponentRange, Date, Month, OffsetDateTime, PrimitiveDateTime, Time};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, FromPrimitive, IntoPrimitive)]
|
||||
#[repr(u8)]
|
||||
pub enum System {
|
||||
Dos = 0,
|
||||
Unix = 3,
|
||||
#[num_enum(default)]
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl System {
|
||||
pub const fn from_u8(system: u8) -> System {
|
||||
use self::System::*;
|
||||
|
||||
match system {
|
||||
0 => Dos,
|
||||
3 => Unix,
|
||||
_ => Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Representation of a moment in time.
|
||||
///
|
||||
/// Zip files use an old format from DOS to store timestamps,
|
||||
|
@ -371,6 +363,9 @@ pub struct ZipFileData {
|
|||
pub large_file: bool,
|
||||
/// AES mode if applicable
|
||||
pub aes_mode: Option<(AesMode, AesVendorVersion)>,
|
||||
|
||||
/// extra fields, see <https://libzip.org/specifications/extrafld.txt>
|
||||
pub extra_fields: Vec<ExtraField>,
|
||||
}
|
||||
|
||||
impl ZipFileData {
|
||||
|
@ -512,10 +507,14 @@ mod test {
|
|||
#[test]
|
||||
fn system() {
|
||||
use super::System;
|
||||
assert_eq!(System::Dos as u16, 0u16);
|
||||
assert_eq!(System::Unix as u16, 3u16);
|
||||
assert_eq!(System::from_u8(0), System::Dos);
|
||||
assert_eq!(System::from_u8(3), System::Unix);
|
||||
assert_eq!(u8::from(System::Dos), 0u8);
|
||||
assert_eq!(System::Dos as u8, 0u8);
|
||||
assert_eq!(System::Unix as u8, 3u8);
|
||||
assert_eq!(u8::from(System::Unix), 3u8);
|
||||
assert_eq!(System::from(0), System::Dos);
|
||||
assert_eq!(System::from(3), System::Unix);
|
||||
assert_eq!(u8::from(System::Unknown), 4u8);
|
||||
assert_eq!(System::Unknown as u8, 4u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -544,6 +543,7 @@ mod test {
|
|||
external_attributes: 0,
|
||||
large_file: false,
|
||||
aes_mode: None,
|
||||
extra_fields: Vec::new(),
|
||||
};
|
||||
assert_eq!(data.file_name_sanitized(), PathBuf::from("path/etc/passwd"));
|
||||
}
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
#![allow(missing_docs)]
|
||||
|
||||
use std::io;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
/// Provides high level API for reading from a stream.
|
||||
pub mod stream {
|
||||
pub use crate::read::stream::*;
|
||||
|
@ -18,3 +23,47 @@ pub mod write {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper methods for writing unsigned integers in little-endian form.
|
||||
pub trait LittleEndianWriteExt: Write {
|
||||
fn write_u16_le(&mut self, input: u16) -> io::Result<()> {
|
||||
self.write_all(&input.to_le_bytes())
|
||||
}
|
||||
|
||||
fn write_u32_le(&mut self, input: u32) -> io::Result<()> {
|
||||
self.write_all(&input.to_le_bytes())
|
||||
}
|
||||
|
||||
fn write_u64_le(&mut self, input: u64) -> io::Result<()> {
|
||||
self.write_all(&input.to_le_bytes())
|
||||
}
|
||||
|
||||
fn write_u128_le(&mut self, input: u128) -> io::Result<()> {
|
||||
self.write_all(&input.to_le_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> LittleEndianWriteExt for W {}
|
||||
|
||||
/// Helper methods for reading unsigned integers in little-endian form.
|
||||
pub trait LittleEndianReadExt: Read {
|
||||
fn read_u16_le(&mut self) -> io::Result<u16> {
|
||||
let mut out = [0u8; 2];
|
||||
self.read_exact(&mut out)?;
|
||||
Ok(u16::from_le_bytes(out))
|
||||
}
|
||||
|
||||
fn read_u32_le(&mut self) -> io::Result<u32> {
|
||||
let mut out = [0u8; 4];
|
||||
self.read_exact(&mut out)?;
|
||||
Ok(u32::from_le_bytes(out))
|
||||
}
|
||||
|
||||
fn read_u64_le(&mut self) -> io::Result<u64> {
|
||||
let mut out = [0u8; 8];
|
||||
self.read_exact(&mut out)?;
|
||||
Ok(u64::from_le_bytes(out))
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> LittleEndianReadExt for R {}
|
||||
|
|
245
src/write.rs
245
src/write.rs
|
@ -5,11 +5,10 @@ use crate::read::{find_content, central_header_to_zip_file, ZipArchive, ZipFile}
|
|||
use crate::result::{ZipError, ZipResult};
|
||||
use crate::spec;
|
||||
use crate::types::{ffi, DateTime, System, ZipFileData, DEFAULT_VERSION};
|
||||
use byteorder::{LittleEndian, WriteBytesExt};
|
||||
#[cfg(any(feature = "_deflate-any", feature = "bzip2", feature = "zstd",))]
|
||||
use core::num::NonZeroU64;
|
||||
use crc32fast::Hasher;
|
||||
use std::collections::HashMap;
|
||||
use indexmap::IndexMap;
|
||||
use std::convert::TryInto;
|
||||
use std::default::Default;
|
||||
use std::io;
|
||||
|
@ -112,8 +111,7 @@ pub(crate) mod zip_writer {
|
|||
/// ```
|
||||
pub struct ZipWriter<W: Write + Seek> {
|
||||
pub(super) inner: GenericZipWriter<W>,
|
||||
pub(super) files: Vec<ZipFileData>,
|
||||
pub(super) files_by_name: HashMap<Box<str>, usize>,
|
||||
pub(super) files: IndexMap<Box<str>, ZipFileData>,
|
||||
pub(super) stats: ZipWriterStats,
|
||||
pub(super) writing_to_file: bool,
|
||||
pub(super) writing_raw: bool,
|
||||
|
@ -127,6 +125,7 @@ use crate::result::ZipError::InvalidArchive;
|
|||
#[cfg(feature = "lzma")]
|
||||
use crate::result::ZipError::UnsupportedArchive;
|
||||
use crate::spec::path_to_string;
|
||||
use crate::unstable::LittleEndianWriteExt;
|
||||
use crate::write::GenericZipWriter::{Closed, Storer};
|
||||
use crate::zipcrypto::ZipCryptoKeys;
|
||||
use crate::CompressionMethod::Stored;
|
||||
|
@ -379,8 +378,8 @@ impl FileOptions<ExtendedFileOptions> {
|
|||
}
|
||||
};
|
||||
vec.reserve_exact(data.len() + 4);
|
||||
vec.write_u16::<LittleEndian>(header_id)?;
|
||||
vec.write_u16::<LittleEndian>(data.len() as u16)?;
|
||||
vec.write_u16_le(header_id)?;
|
||||
vec.write_u16_le(data.len() as u16)?;
|
||||
vec.write_all(data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -436,7 +435,7 @@ impl<W: Write + Seek> Write for ZipWriter<W> {
|
|||
if let Ok(count) = write_result {
|
||||
self.stats.update(&buf[0..count]);
|
||||
if self.stats.bytes_written > spec::ZIP64_BYTES_THR
|
||||
&& !self.files.last_mut().unwrap().large_file
|
||||
&& !self.files.last_mut().unwrap().1.large_file
|
||||
{
|
||||
self.abort_file().unwrap();
|
||||
return Err(io::Error::new(
|
||||
|
@ -480,8 +479,7 @@ impl<A: Read + Write + Seek> ZipWriter<A> {
|
|||
|
||||
Ok(ZipWriter {
|
||||
inner: Storer(MaybeEncrypted::Unencrypted(readwriter)),
|
||||
files: metadata.files.into(),
|
||||
files_by_name: metadata.names_map,
|
||||
files: metadata.files,
|
||||
stats: Default::default(),
|
||||
writing_to_file: false,
|
||||
comment: footer.zip_file_comment,
|
||||
|
@ -598,6 +596,39 @@ impl<A: Read + Write + Seek> ZipWriter<A> {
|
|||
) -> ZipResult<()> {
|
||||
self.deep_copy_file(&path_to_string(src_path), &path_to_string(dest_path))
|
||||
}
|
||||
|
||||
/// Write the zip file into the backing stream, then produce a readable archive of that data.
|
||||
///
|
||||
/// This method avoids parsing the central directory records at the end of the stream for
|
||||
/// a slight performance improvement over running [`ZipArchive::new()`] on the output of
|
||||
/// [`Self::finish()`].
|
||||
///
|
||||
///```
|
||||
/// # fn main() -> Result<(), zip::result::ZipError> {
|
||||
/// use std::io::{Cursor, prelude::*};
|
||||
/// use zip::{ZipArchive, ZipWriter, write::SimpleFileOptions};
|
||||
///
|
||||
/// let buf = Cursor::new(Vec::new());
|
||||
/// let mut zip = ZipWriter::new(buf);
|
||||
/// let options = SimpleFileOptions::default();
|
||||
/// zip.start_file("a.txt", options)?;
|
||||
/// zip.write_all(b"hello\n")?;
|
||||
///
|
||||
/// let mut zip = zip.finish_into_readable()?;
|
||||
/// let mut s: String = String::new();
|
||||
/// zip.by_name("a.txt")?.read_to_string(&mut s)?;
|
||||
/// assert_eq!(s, "hello\n");
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
///```
|
||||
pub fn finish_into_readable(mut self) -> ZipResult<ZipArchive<A>> {
|
||||
let central_start = self.finalize()?;
|
||||
let inner = mem::replace(&mut self.inner, Closed).unwrap();
|
||||
let comment = mem::take(&mut self.comment);
|
||||
let files = mem::take(&mut self.files);
|
||||
let archive = ZipArchive::from_finalized_writer(files, comment, inner, central_start)?;
|
||||
Ok(archive)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write + Seek> ZipWriter<W> {
|
||||
|
@ -609,8 +640,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
pub fn new(inner: W) -> ZipWriter<W> {
|
||||
ZipWriter {
|
||||
inner: Storer(MaybeEncrypted::Unencrypted(inner)),
|
||||
files: Vec::new(),
|
||||
files_by_name: HashMap::new(),
|
||||
files: IndexMap::new(),
|
||||
stats: Default::default(),
|
||||
writing_to_file: false,
|
||||
writing_raw: false,
|
||||
|
@ -697,38 +727,39 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
external_attributes: permissions << 16,
|
||||
large_file: options.large_file,
|
||||
aes_mode: None,
|
||||
extra_fields: Vec::new(),
|
||||
};
|
||||
let index = self.insert_file_data(file)?;
|
||||
let file = &mut self.files[index];
|
||||
let writer = self.inner.get_plain();
|
||||
writer.write_u32::<LittleEndian>(spec::LOCAL_FILE_HEADER_SIGNATURE)?;
|
||||
writer.write_u32_le(spec::LOCAL_FILE_HEADER_SIGNATURE)?;
|
||||
// version needed to extract
|
||||
writer.write_u16::<LittleEndian>(file.version_needed())?;
|
||||
writer.write_u16_le(file.version_needed())?;
|
||||
// general purpose bit flag
|
||||
let flag = if !file.file_name.is_ascii() {
|
||||
1u16 << 11
|
||||
} else {
|
||||
0
|
||||
} | if file.encrypted { 1u16 << 0 } else { 0 };
|
||||
writer.write_u16::<LittleEndian>(flag)?;
|
||||
writer.write_u16_le(flag)?;
|
||||
// Compression method
|
||||
#[allow(deprecated)]
|
||||
writer.write_u16::<LittleEndian>(file.compression_method.to_u16())?;
|
||||
writer.write_u16_le(file.compression_method.to_u16())?;
|
||||
// last mod file time and last mod file date
|
||||
writer.write_u16::<LittleEndian>(file.last_modified_time.timepart())?;
|
||||
writer.write_u16::<LittleEndian>(file.last_modified_time.datepart())?;
|
||||
writer.write_u16_le(file.last_modified_time.timepart())?;
|
||||
writer.write_u16_le(file.last_modified_time.datepart())?;
|
||||
// crc-32
|
||||
writer.write_u32::<LittleEndian>(file.crc32)?;
|
||||
writer.write_u32_le(file.crc32)?;
|
||||
// compressed size and uncompressed size
|
||||
if file.large_file {
|
||||
writer.write_u32::<LittleEndian>(spec::ZIP64_BYTES_THR as u32)?;
|
||||
writer.write_u32::<LittleEndian>(spec::ZIP64_BYTES_THR as u32)?;
|
||||
writer.write_u32_le(spec::ZIP64_BYTES_THR as u32)?;
|
||||
writer.write_u32_le(spec::ZIP64_BYTES_THR as u32)?;
|
||||
} else {
|
||||
writer.write_u32::<LittleEndian>(file.compressed_size as u32)?;
|
||||
writer.write_u32::<LittleEndian>(file.uncompressed_size as u32)?;
|
||||
writer.write_u32_le(file.compressed_size as u32)?;
|
||||
writer.write_u32_le(file.uncompressed_size as u32)?;
|
||||
}
|
||||
// file name length
|
||||
writer.write_u16::<LittleEndian>(file.file_name.as_bytes().len() as u16)?;
|
||||
writer.write_u16_le(file.file_name.as_bytes().len() as u16)?;
|
||||
// extra field length
|
||||
let mut extra_field_length = file.extra_field_len();
|
||||
if file.large_file {
|
||||
|
@ -739,7 +770,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
return Err(InvalidArchive("Extra data field is too large"));
|
||||
}
|
||||
let extra_field_length = extra_field_length as u16;
|
||||
writer.write_u16::<LittleEndian>(extra_field_length)?;
|
||||
writer.write_u16_le(extra_field_length)?;
|
||||
// file name
|
||||
writer.write_all(file.file_name.as_bytes())?;
|
||||
// zip64 extra field
|
||||
|
@ -768,7 +799,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
let pad_body = vec![0; pad_length - 4];
|
||||
writer.write_all(b"za").map_err(ZipError::from)?; // 0x617a
|
||||
writer
|
||||
.write_u16::<LittleEndian>(pad_body.len() as u16)
|
||||
.write_u16_le(pad_body.len() as u16)
|
||||
.map_err(ZipError::from)?;
|
||||
writer.write_all(&pad_body).map_err(ZipError::from)?;
|
||||
} else {
|
||||
|
@ -781,7 +812,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
|
||||
// Update extra field length in local file header.
|
||||
writer.seek(SeekFrom::Start(file.header_start + 28))?;
|
||||
writer.write_u16::<LittleEndian>(new_extra_field_length)?;
|
||||
writer.write_u16_le(new_extra_field_length)?;
|
||||
writer.seek(SeekFrom::Start(header_end))?;
|
||||
debug_assert_eq!(header_end % align, 0);
|
||||
}
|
||||
|
@ -809,15 +840,12 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
}
|
||||
|
||||
fn insert_file_data(&mut self, file: ZipFileData) -> ZipResult<usize> {
|
||||
let name = &file.file_name;
|
||||
if self.files_by_name.contains_key(name) {
|
||||
if self.files.contains_key(&file.file_name) {
|
||||
return Err(InvalidArchive("Duplicate filename"));
|
||||
}
|
||||
let name = name.to_owned();
|
||||
self.files.push(file);
|
||||
let index = self.files.len() - 1;
|
||||
self.files_by_name.insert(name, index);
|
||||
Ok(index)
|
||||
let name = file.file_name.to_owned();
|
||||
self.files.insert(name.clone(), file);
|
||||
Ok(self.files.get_index_of(&name).unwrap())
|
||||
}
|
||||
|
||||
fn finish_file(&mut self) -> ZipResult<()> {
|
||||
|
@ -838,7 +866,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
if !self.writing_raw {
|
||||
let file = match self.files.last_mut() {
|
||||
None => return Ok(()),
|
||||
Some(f) => f,
|
||||
Some((_, f)) => f,
|
||||
};
|
||||
file.crc32 = self.stats.hasher.clone().finalize();
|
||||
file.uncompressed_size = self.stats.bytes_written;
|
||||
|
@ -878,8 +906,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
/// Removes the file currently being written from the archive if there is one, or else removes
|
||||
/// the file most recently written.
|
||||
pub fn abort_file(&mut self) -> ZipResult<()> {
|
||||
let last_file = self.files.pop().ok_or(ZipError::FileNotFound)?;
|
||||
self.files_by_name.remove(&last_file.file_name);
|
||||
let (_, last_file) = self.files.pop().ok_or(ZipError::FileNotFound)?;
|
||||
let make_plain_writer = self.inner.prepare_next_writer(
|
||||
Stored,
|
||||
None,
|
||||
|
@ -892,7 +919,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
// overwrite a valid file and corrupt the archive
|
||||
let rewind_safe: bool = match last_file.data_start.get() {
|
||||
None => self.files.is_empty(),
|
||||
Some(last_file_start) => self.files.iter().all(|file| {
|
||||
Some(last_file_start) => self.files.values().all(|file| {
|
||||
file.data_start
|
||||
.get()
|
||||
.is_some_and(|start| start < last_file_start)
|
||||
|
@ -935,6 +962,68 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/* TODO: link to/use Self::finish_into_readable() from https://github.com/zip-rs/zip/pull/400 in
|
||||
* this docstring. */
|
||||
/// Copy over the entire contents of another archive verbatim.
|
||||
///
|
||||
/// This method extracts file metadata from the `source` archive, then simply performs a single
|
||||
/// big [`io::copy()`](io::copy) to transfer all the actual file contents without any
|
||||
/// decompression or decryption. This is more performant than the equivalent operation of
|
||||
/// calling [`Self::raw_copy_file()`] for each entry from the `source` archive in sequence.
|
||||
///
|
||||
///```
|
||||
/// # fn main() -> Result<(), zip::result::ZipError> {
|
||||
/// use std::io::{Cursor, prelude::*};
|
||||
/// use zip::{ZipArchive, ZipWriter, write::SimpleFileOptions};
|
||||
///
|
||||
/// let buf = Cursor::new(Vec::new());
|
||||
/// let mut zip = ZipWriter::new(buf);
|
||||
/// zip.start_file("a.txt", SimpleFileOptions::default())?;
|
||||
/// zip.write_all(b"hello\n")?;
|
||||
/// let src = ZipArchive::new(zip.finish()?)?;
|
||||
///
|
||||
/// let buf = Cursor::new(Vec::new());
|
||||
/// let mut zip = ZipWriter::new(buf);
|
||||
/// zip.start_file("b.txt", SimpleFileOptions::default())?;
|
||||
/// zip.write_all(b"hey\n")?;
|
||||
/// let src2 = ZipArchive::new(zip.finish()?)?;
|
||||
///
|
||||
/// let buf = Cursor::new(Vec::new());
|
||||
/// let mut zip = ZipWriter::new(buf);
|
||||
/// zip.merge_archive(src)?;
|
||||
/// zip.merge_archive(src2)?;
|
||||
/// let mut result = ZipArchive::new(zip.finish()?)?;
|
||||
///
|
||||
/// let mut s: String = String::new();
|
||||
/// result.by_name("a.txt")?.read_to_string(&mut s)?;
|
||||
/// assert_eq!(s, "hello\n");
|
||||
/// s.clear();
|
||||
/// result.by_name("b.txt")?.read_to_string(&mut s)?;
|
||||
/// assert_eq!(s, "hey\n");
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
///```
|
||||
pub fn merge_archive<R>(&mut self, mut source: ZipArchive<R>) -> ZipResult<()>
|
||||
where
|
||||
R: Read + io::Seek,
|
||||
{
|
||||
self.finish_file()?;
|
||||
|
||||
/* Ensure we accept the file contents on faith (and avoid overwriting the data).
|
||||
* See raw_copy_file_rename(). */
|
||||
self.writing_to_file = true;
|
||||
self.writing_raw = true;
|
||||
|
||||
let writer = self.inner.get_plain();
|
||||
/* Get the file entries from the source archive. */
|
||||
let new_files = source.merge_contents(writer)?;
|
||||
|
||||
/* These file entries are now ours! */
|
||||
self.files.extend(new_files);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn normalize_options<T: FileOptionExtension>(options: &mut FileOptions<T>) {
|
||||
if options.permissions.is_none() {
|
||||
options.permissions = Some(0o644);
|
||||
|
@ -1101,7 +1190,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
/// This will return the writer, but one should normally not append any data to the end of the file.
|
||||
/// Note that the zipfile will also be finished on drop.
|
||||
pub fn finish(&mut self) -> ZipResult<W> {
|
||||
self.finalize()?;
|
||||
let _central_start = self.finalize()?;
|
||||
let inner = mem::replace(&mut self.inner, Closed);
|
||||
Ok(inner.unwrap())
|
||||
}
|
||||
|
@ -1161,10 +1250,10 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
self.add_symlink(path_to_string(path), path_to_string(target), options)
|
||||
}
|
||||
|
||||
fn finalize(&mut self) -> ZipResult<()> {
|
||||
fn finalize(&mut self) -> ZipResult<u64> {
|
||||
self.finish_file()?;
|
||||
|
||||
{
|
||||
let central_start = {
|
||||
let central_start = self.write_central_and_footer()?;
|
||||
let writer = self.inner.get_plain();
|
||||
let footer_end = writer.stream_position()?;
|
||||
|
@ -1176,16 +1265,17 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
writer.seek(SeekFrom::End(-(central_and_footer_size as i64)))?;
|
||||
self.write_central_and_footer()?;
|
||||
}
|
||||
}
|
||||
central_start
|
||||
};
|
||||
|
||||
Ok(())
|
||||
Ok(central_start)
|
||||
}
|
||||
|
||||
fn write_central_and_footer(&mut self) -> Result<u64, ZipError> {
|
||||
let writer = self.inner.get_plain();
|
||||
|
||||
let central_start = writer.stream_position()?;
|
||||
for file in self.files.iter() {
|
||||
for file in self.files.values() {
|
||||
write_central_directory_header(writer, file)?;
|
||||
}
|
||||
let central_size = writer.stream_position()? - central_start;
|
||||
|
@ -1231,7 +1321,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
}
|
||||
|
||||
fn index_by_name(&self, name: &str) -> ZipResult<usize> {
|
||||
Ok(*self.files_by_name.get(name).ok_or(ZipError::FileNotFound)?)
|
||||
self.files.get_index_of(name).ok_or(ZipError::FileNotFound)
|
||||
}
|
||||
|
||||
/// Adds another entry to the central directory referring to the same content as an existing
|
||||
|
@ -1419,7 +1509,10 @@ impl<W: Write + Seek> GenericZipWriter<W> {
|
|||
#[cfg(feature = "deflate-zopfli")]
|
||||
GenericZipWriter::ZopfliDeflater(w) => w.finish()?,
|
||||
#[cfg(feature = "deflate-zopfli")]
|
||||
GenericZipWriter::BufferedZopfliDeflater(w) => w.into_inner()?.finish()?,
|
||||
GenericZipWriter::BufferedZopfliDeflater(w) => w
|
||||
.into_inner()
|
||||
.map_err(|e| ZipError::Io(e.into_error()))?
|
||||
.finish()?,
|
||||
#[cfg(feature = "bzip2")]
|
||||
GenericZipWriter::Bzip2(w) => w.finish()?,
|
||||
#[cfg(feature = "zstd")]
|
||||
|
@ -1519,7 +1612,7 @@ fn clamp_opt<T: Ord + Copy, U: Ord + Copy + TryFrom<T>>(
|
|||
fn update_local_file_header<T: Write + Seek>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> {
|
||||
const CRC32_OFFSET: u64 = 14;
|
||||
writer.seek(SeekFrom::Start(file.header_start + CRC32_OFFSET))?;
|
||||
writer.write_u32::<LittleEndian>(file.crc32)?;
|
||||
writer.write_u32_le(file.crc32)?;
|
||||
if file.large_file {
|
||||
update_local_zip64_extra_field(writer, file)?;
|
||||
} else {
|
||||
|
@ -1530,9 +1623,9 @@ fn update_local_file_header<T: Write + Seek>(writer: &mut T, file: &ZipFileData)
|
|||
"Large file option has not been set",
|
||||
)));
|
||||
}
|
||||
writer.write_u32::<LittleEndian>(file.compressed_size as u32)?;
|
||||
writer.write_u32_le(file.compressed_size as u32)?;
|
||||
// uncompressed size is already checked on write to catch it as soon as possible
|
||||
writer.write_u32::<LittleEndian>(file.uncompressed_size as u32)?;
|
||||
writer.write_u32_le(file.uncompressed_size as u32)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1544,49 +1637,49 @@ fn write_central_directory_header<T: Write>(writer: &mut T, file: &ZipFileData)
|
|||
write_central_zip64_extra_field(&mut zip64_extra_field.as_mut(), file)?;
|
||||
|
||||
// central file header signature
|
||||
writer.write_u32::<LittleEndian>(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?;
|
||||
writer.write_u32_le(spec::CENTRAL_DIRECTORY_HEADER_SIGNATURE)?;
|
||||
// version made by
|
||||
let version_made_by = (file.system as u16) << 8 | (file.version_made_by as u16);
|
||||
writer.write_u16::<LittleEndian>(version_made_by)?;
|
||||
writer.write_u16_le(version_made_by)?;
|
||||
// version needed to extract
|
||||
writer.write_u16::<LittleEndian>(file.version_needed())?;
|
||||
writer.write_u16_le(file.version_needed())?;
|
||||
// general purpose bit flag
|
||||
let flag = if !file.file_name.is_ascii() {
|
||||
1u16 << 11
|
||||
} else {
|
||||
0
|
||||
} | if file.encrypted { 1u16 << 0 } else { 0 };
|
||||
writer.write_u16::<LittleEndian>(flag)?;
|
||||
writer.write_u16_le(flag)?;
|
||||
// compression method
|
||||
#[allow(deprecated)]
|
||||
writer.write_u16::<LittleEndian>(file.compression_method.to_u16())?;
|
||||
writer.write_u16_le(file.compression_method.to_u16())?;
|
||||
// last mod file time + date
|
||||
writer.write_u16::<LittleEndian>(file.last_modified_time.timepart())?;
|
||||
writer.write_u16::<LittleEndian>(file.last_modified_time.datepart())?;
|
||||
writer.write_u16_le(file.last_modified_time.timepart())?;
|
||||
writer.write_u16_le(file.last_modified_time.datepart())?;
|
||||
// crc-32
|
||||
writer.write_u32::<LittleEndian>(file.crc32)?;
|
||||
writer.write_u32_le(file.crc32)?;
|
||||
// compressed size
|
||||
writer.write_u32::<LittleEndian>(file.compressed_size.min(spec::ZIP64_BYTES_THR) as u32)?;
|
||||
writer.write_u32_le(file.compressed_size.min(spec::ZIP64_BYTES_THR) as u32)?;
|
||||
// uncompressed size
|
||||
writer.write_u32::<LittleEndian>(file.uncompressed_size.min(spec::ZIP64_BYTES_THR) as u32)?;
|
||||
writer.write_u32_le(file.uncompressed_size.min(spec::ZIP64_BYTES_THR) as u32)?;
|
||||
// file name length
|
||||
writer.write_u16::<LittleEndian>(file.file_name.as_bytes().len() as u16)?;
|
||||
writer.write_u16_le(file.file_name.as_bytes().len() as u16)?;
|
||||
// extra field length
|
||||
writer.write_u16::<LittleEndian>(
|
||||
writer.write_u16_le(
|
||||
zip64_extra_field_length
|
||||
+ file.extra_field_len() as u16
|
||||
+ file.central_extra_field_len() as u16,
|
||||
)?;
|
||||
// file comment length
|
||||
writer.write_u16::<LittleEndian>(0)?;
|
||||
writer.write_u16_le(0)?;
|
||||
// disk number start
|
||||
writer.write_u16::<LittleEndian>(0)?;
|
||||
writer.write_u16_le(0)?;
|
||||
// internal file attributes
|
||||
writer.write_u16::<LittleEndian>(0)?;
|
||||
writer.write_u16_le(0)?;
|
||||
// external file attributes
|
||||
writer.write_u32::<LittleEndian>(file.external_attributes)?;
|
||||
writer.write_u32_le(file.external_attributes)?;
|
||||
// relative offset of local header
|
||||
writer.write_u32::<LittleEndian>(file.header_start.min(spec::ZIP64_BYTES_THR) as u32)?;
|
||||
writer.write_u32_le(file.header_start.min(spec::ZIP64_BYTES_THR) as u32)?;
|
||||
// file name
|
||||
writer.write_all(file.file_name.as_bytes())?;
|
||||
// zip64 extra field
|
||||
|
@ -1640,10 +1733,10 @@ fn validate_extra_data(header_id: u16, data: &[u8]) -> ZipResult<()> {
|
|||
fn write_local_zip64_extra_field<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> {
|
||||
// This entry in the Local header MUST include BOTH original
|
||||
// and compressed file size fields.
|
||||
writer.write_u16::<LittleEndian>(0x0001)?;
|
||||
writer.write_u16::<LittleEndian>(16)?;
|
||||
writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
|
||||
writer.write_u64::<LittleEndian>(file.compressed_size)?;
|
||||
writer.write_u16_le(0x0001)?;
|
||||
writer.write_u16_le(16)?;
|
||||
writer.write_u64_le(file.uncompressed_size)?;
|
||||
writer.write_u64_le(file.compressed_size)?;
|
||||
// Excluded fields:
|
||||
// u32: disk start number
|
||||
Ok(())
|
||||
|
@ -1655,8 +1748,8 @@ fn update_local_zip64_extra_field<T: Write + Seek>(
|
|||
) -> ZipResult<()> {
|
||||
let zip64_extra_field = file.header_start + 30 + file.file_name.as_bytes().len() as u64;
|
||||
writer.seek(SeekFrom::Start(zip64_extra_field + 4))?;
|
||||
writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
|
||||
writer.write_u64::<LittleEndian>(file.compressed_size)?;
|
||||
writer.write_u64_le(file.uncompressed_size)?;
|
||||
writer.write_u64_le(file.compressed_size)?;
|
||||
// Excluded fields:
|
||||
// u32: disk start number
|
||||
Ok(())
|
||||
|
@ -1681,18 +1774,18 @@ fn write_central_zip64_extra_field<T: Write>(writer: &mut T, file: &ZipFileData)
|
|||
size += 8;
|
||||
}
|
||||
if size > 0 {
|
||||
writer.write_u16::<LittleEndian>(0x0001)?;
|
||||
writer.write_u16::<LittleEndian>(size)?;
|
||||
writer.write_u16_le(0x0001)?;
|
||||
writer.write_u16_le(size)?;
|
||||
size += 4;
|
||||
|
||||
if uncompressed_size {
|
||||
writer.write_u64::<LittleEndian>(file.uncompressed_size)?;
|
||||
writer.write_u64_le(file.uncompressed_size)?;
|
||||
}
|
||||
if compressed_size {
|
||||
writer.write_u64::<LittleEndian>(file.compressed_size)?;
|
||||
writer.write_u64_le(file.compressed_size)?;
|
||||
}
|
||||
if header_start {
|
||||
writer.write_u64::<LittleEndian>(file.header_start)?;
|
||||
writer.write_u64_le(file.header_start)?;
|
||||
}
|
||||
// Excluded fields:
|
||||
// u32: disk start number
|
||||
|
|
BIN
tests/data/extended_timestamp.zip
Normal file
BIN
tests/data/extended_timestamp.zip
Normal file
Binary file not shown.
|
@ -1,8 +1,8 @@
|
|||
use byteorder::{LittleEndian, WriteBytesExt};
|
||||
use std::collections::HashSet;
|
||||
use std::io::prelude::*;
|
||||
use std::io::Cursor;
|
||||
use zip::result::ZipResult;
|
||||
use zip::unstable::LittleEndianWriteExt;
|
||||
use zip::write::ExtendedFileOptions;
|
||||
use zip::write::FileOptions;
|
||||
use zip::write::SimpleFileOptions;
|
||||
|
@ -159,8 +159,8 @@ fn check_test_archive<R: Read + Seek>(zip_file: R) -> ZipResult<zip::ZipArchive<
|
|||
{
|
||||
let file_with_extra_data = archive.by_name("test_with_extra_data/🐢.txt")?;
|
||||
let mut extra_data = Vec::new();
|
||||
extra_data.write_u16::<LittleEndian>(0xbeef)?;
|
||||
extra_data.write_u16::<LittleEndian>(EXTRA_DATA.len() as u16)?;
|
||||
extra_data.write_u16_le(0xbeef)?;
|
||||
extra_data.write_u16_le(EXTRA_DATA.len() as u16)?;
|
||||
extra_data.write_all(EXTRA_DATA)?;
|
||||
assert_eq!(
|
||||
file_with_extra_data.extra_data(),
|
||||
|
|
19
tests/zip_extended_timestamp.rs
Normal file
19
tests/zip_extended_timestamp.rs
Normal file
|
@ -0,0 +1,19 @@
|
|||
use std::io;
|
||||
use zip::ZipArchive;
|
||||
|
||||
#[test]
|
||||
fn test_extended_timestamp() {
|
||||
let mut v = Vec::new();
|
||||
v.extend_from_slice(include_bytes!("../tests/data/extended_timestamp.zip"));
|
||||
let mut archive = ZipArchive::new(io::Cursor::new(v)).expect("couldn't open test zip file");
|
||||
|
||||
for field in archive.by_name("test.txt").unwrap().extra_data_fields() {
|
||||
match field {
|
||||
zip::ExtraField::ExtendedTimestamp(ts) => {
|
||||
assert!(ts.ac_time().is_none());
|
||||
assert!(ts.cr_time().is_none());
|
||||
assert_eq!(*ts.mod_time().unwrap(), 1714635025);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Add table
Reference in a new issue