Replace AtomicU64 with OnceLock
This commit is contained in:
parent
b9142d8141
commit
9438bef0f7
7 changed files with 34 additions and 63 deletions
2
.github/workflows/ci.yaml
vendored
2
.github/workflows/ci.yaml
vendored
|
@ -15,7 +15,7 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macOS-latest, windows-latest]
|
||||
rust: [stable, 1.67.0, nightly]
|
||||
rust: [stable, 1.70, nightly]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
|
|
|
@ -7,7 +7,7 @@ license = "MIT"
|
|||
repository = "https://github.com/Pr0methean/zip-next.git"
|
||||
keywords = ["zip", "archive"]
|
||||
description = """
|
||||
rust-version = "1.67.0"
|
||||
rust-version = "1.70.0"
|
||||
Library to support the reading and writing of zip files.
|
||||
"""
|
||||
edition = "2021"
|
||||
|
|
|
@ -64,7 +64,7 @@ By default `aes-crypto`, `deflate`, `deflate-zlib-ng`, `deflate-zopfli`, `bzip2`
|
|||
MSRV
|
||||
----
|
||||
|
||||
Our current Minimum Supported Rust Version is **1.67.0**. When adding features,
|
||||
Our current Minimum Supported Rust Version is **1.70**. When adding features,
|
||||
we will follow these guidelines:
|
||||
|
||||
- We will always support the latest four minor Rust versions. This gives you a 6
|
||||
|
|
31
src/read.rs
31
src/read.rs
|
@ -8,14 +8,14 @@ use crate::crc32::Crc32Reader;
|
|||
use crate::read::zip_archive::Shared;
|
||||
use crate::result::{ZipError, ZipResult};
|
||||
use crate::spec;
|
||||
use crate::types::{AesMode, AesVendorVersion, AtomicU64, DateTime, System, ZipFileData};
|
||||
use crate::types::{AesMode, AesVendorVersion, DateTime, System, ZipFileData};
|
||||
use crate::zipcrypto::{ZipCryptoReader, ZipCryptoReaderValid, ZipCryptoValidator};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use std::borrow::{Borrow, Cow};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{self, prelude::*};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
#[cfg(any(
|
||||
feature = "deflate",
|
||||
|
@ -227,14 +227,19 @@ pub(crate) fn find_content<'a>(
|
|||
if signature != spec::LOCAL_FILE_HEADER_SIGNATURE {
|
||||
return Err(ZipError::InvalidArchive("Invalid local file header"));
|
||||
}
|
||||
|
||||
reader.seek(io::SeekFrom::Current(22))?;
|
||||
let file_name_length = reader.read_u16::<LittleEndian>()? as u64;
|
||||
let extra_field_length = reader.read_u16::<LittleEndian>()? as u64;
|
||||
let magic_and_header = 4 + 22 + 2 + 2;
|
||||
let data_start = data.header_start + magic_and_header + file_name_length + extra_field_length;
|
||||
data.data_start.store(data_start);
|
||||
|
||||
let data_start = match data.data_start.get() {
|
||||
None => {
|
||||
reader.seek(io::SeekFrom::Current(22))?;
|
||||
let file_name_length = reader.read_u16::<LittleEndian>()? as u64;
|
||||
let extra_field_length = reader.read_u16::<LittleEndian>()? as u64;
|
||||
let magic_and_header = 4 + 22 + 2 + 2;
|
||||
let data_start = data.header_start + magic_and_header + file_name_length + extra_field_length;
|
||||
data.data_start.get_or_init(|| data_start);
|
||||
data_start
|
||||
}
|
||||
Some(start) => *start
|
||||
};
|
||||
|
||||
reader.seek(io::SeekFrom::Start(data_start))?;
|
||||
Ok((reader as &mut dyn Read).take(data.compressed_size))
|
||||
}
|
||||
|
@ -827,7 +832,7 @@ fn central_header_to_zip_file_inner<R: Read>(
|
|||
file_comment,
|
||||
header_start: offset,
|
||||
central_header_start,
|
||||
data_start: AtomicU64::new(0),
|
||||
data_start: OnceLock::new(),
|
||||
external_attributes: external_file_attributes,
|
||||
large_file: false,
|
||||
aes_mode: None,
|
||||
|
@ -1068,7 +1073,7 @@ impl<'a> ZipFile<'a> {
|
|||
|
||||
/// Get the starting offset of the data of the compressed file
|
||||
pub fn data_start(&self) -> u64 {
|
||||
self.data.data_start.load()
|
||||
*self.data.data_start.get().unwrap_or(&0)
|
||||
}
|
||||
|
||||
/// Get the starting offset of the zip header for this file
|
||||
|
@ -1188,7 +1193,7 @@ pub fn read_zipfile_from_stream<'a, R: Read>(reader: &'a mut R) -> ZipResult<Opt
|
|||
// header_start and data start are not available, but also don't matter, since seeking is
|
||||
// not available.
|
||||
header_start: 0,
|
||||
data_start: AtomicU64::new(0),
|
||||
data_start: OnceLock::new(),
|
||||
central_header_start: 0,
|
||||
// The external_attributes field is only available in the central directory.
|
||||
// We set this to zero, which should be valid as the docs state 'If input came
|
||||
|
|
|
@ -200,7 +200,7 @@ impl ZipStreamFileMetadata {
|
|||
|
||||
/// Get the starting offset of the data of the compressed file
|
||||
pub fn data_start(&self) -> u64 {
|
||||
self.0.data_start.load()
|
||||
*self.0.data_start.get().unwrap_or(&0)
|
||||
}
|
||||
|
||||
/// Get unix mode for the file
|
||||
|
|
43
src/types.rs
43
src/types.rs
|
@ -1,16 +1,10 @@
|
|||
//! Types that specify what is contained in a ZIP.
|
||||
use path::{Component, Path, PathBuf};
|
||||
use std::path;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
#[cfg(feature = "chrono")]
|
||||
use chrono::{Datelike, NaiveDate, NaiveDateTime, NaiveTime, Timelike};
|
||||
#[cfg(not(any(
|
||||
all(target_arch = "arm", target_pointer_width = "32"),
|
||||
target_arch = "mips",
|
||||
target_arch = "powerpc"
|
||||
)))]
|
||||
use std::sync::atomic;
|
||||
#[cfg(doc)]
|
||||
use {crate::read::ZipFile, crate::write::FileOptions};
|
||||
|
||||
|
@ -330,37 +324,6 @@ impl TryFrom<OffsetDateTime> for DateTime {
|
|||
|
||||
pub const DEFAULT_VERSION: u8 = 46;
|
||||
|
||||
/// A type like `AtomicU64` except it implements `Clone` and has predefined
|
||||
/// ordering.
|
||||
///
|
||||
/// It uses `Relaxed` ordering because it is not used for synchronisation.
|
||||
#[derive(Debug)]
|
||||
pub struct AtomicU64(atomic::AtomicU64);
|
||||
|
||||
impl AtomicU64 {
|
||||
pub const fn new(v: u64) -> Self {
|
||||
Self(atomic::AtomicU64::new(v))
|
||||
}
|
||||
|
||||
pub fn load(&self) -> u64 {
|
||||
self.0.load(atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn store(&self, val: u64) {
|
||||
self.0.store(val, atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self) -> &mut u64 {
|
||||
self.0.get_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for AtomicU64 {
|
||||
fn clone(&self) -> Self {
|
||||
Self(atomic::AtomicU64::new(self.load()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Structure representing a ZIP file.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ZipFileData {
|
||||
|
@ -401,7 +364,7 @@ pub struct ZipFileData {
|
|||
/// Note that when this is not known, it is set to 0
|
||||
pub central_header_start: u64,
|
||||
/// Specifies where the compressed data of the file starts
|
||||
pub data_start: AtomicU64,
|
||||
pub data_start: OnceLock<u64>,
|
||||
/// External file attributes
|
||||
pub external_attributes: u32,
|
||||
/// Reserve local ZIP64 extra field
|
||||
|
@ -562,7 +525,7 @@ mod test {
|
|||
central_extra_field: Arc::new(vec![]),
|
||||
file_comment: String::with_capacity(0).into_boxed_str(),
|
||||
header_start: 0,
|
||||
data_start: AtomicU64::new(0),
|
||||
data_start: OnceLock::new(),
|
||||
central_header_start: 0,
|
||||
external_attributes: 0,
|
||||
large_file: false,
|
||||
|
|
15
src/write.rs
15
src/write.rs
|
@ -4,7 +4,7 @@ use crate::compression::CompressionMethod;
|
|||
use crate::read::{find_content, ZipArchive, ZipFile, ZipFileReader};
|
||||
use crate::result::{ZipError, ZipResult};
|
||||
use crate::spec;
|
||||
use crate::types::{ffi, AtomicU64, DateTime, System, ZipFileData, DEFAULT_VERSION};
|
||||
use crate::types::{ffi, DateTime, System, ZipFileData, DEFAULT_VERSION};
|
||||
use byteorder::{LittleEndian, WriteBytesExt};
|
||||
#[cfg(any(
|
||||
feature = "deflate",
|
||||
|
@ -24,7 +24,7 @@ use std::io::prelude::*;
|
|||
use std::io::{BufReader, SeekFrom};
|
||||
use std::mem;
|
||||
use std::str::{from_utf8, Utf8Error};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
#[cfg(any(
|
||||
feature = "deflate",
|
||||
|
@ -477,7 +477,7 @@ impl<A: Read + Write + Seek> ZipWriter<A> {
|
|||
let write_position = self.inner.get_plain().stream_position()?;
|
||||
let src_index = self.index_by_name(src_name)?;
|
||||
let src_data = &self.files[src_index];
|
||||
let data_start = src_data.data_start.load();
|
||||
let data_start = *src_data.data_start.get().unwrap_or(&0);
|
||||
let compressed_size = src_data.compressed_size;
|
||||
debug_assert!(compressed_size <= write_position - data_start);
|
||||
let uncompressed_size = src_data.uncompressed_size;
|
||||
|
@ -616,7 +616,7 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
central_extra_field: options.central_extra_data,
|
||||
file_comment: String::with_capacity(0).into_boxed_str(),
|
||||
header_start,
|
||||
data_start: AtomicU64::new(0),
|
||||
data_start: OnceLock::new(),
|
||||
central_header_start: 0,
|
||||
external_attributes: permissions << 16,
|
||||
large_file: options.large_file,
|
||||
|
@ -710,7 +710,8 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
self.inner = Storer(MaybeEncrypted::Encrypted(zipwriter));
|
||||
}
|
||||
self.stats.start = header_end;
|
||||
*file.data_start.get_mut() = header_end;
|
||||
debug_assert!(file.data_start.get().is_none());
|
||||
file.data_start.get_or_init(|| header_end);
|
||||
self.writing_to_file = true;
|
||||
self.stats.bytes_written = 0;
|
||||
self.stats.hasher = Hasher::new();
|
||||
|
@ -800,10 +801,12 @@ impl<W: Write + Seek> ZipWriter<W> {
|
|||
self.switch_to_non_encrypting_writer()?;
|
||||
// Make sure this is the last file, and that no shallow copies of it remain; otherwise we'd
|
||||
// overwrite a valid file and corrupt the archive
|
||||
let last_file_start = last_file.data_start.get().unwrap();
|
||||
if self
|
||||
.files
|
||||
.iter()
|
||||
.all(|file| file.data_start.load() < last_file.data_start.load())
|
||||
.flat_map(|file| file.data_start.get())
|
||||
.all(|start| start < last_file_start)
|
||||
{
|
||||
self.inner
|
||||
.get_plain()
|
||||
|
|
Loading…
Add table
Reference in a new issue