Compare commits

..

No commits in common. "e05886aac542e9d27d7395551937596cf9c13008" and "cab9ad922ea83d73acc88e4d5460b9215af31e3f" have entirely different histories.

8 changed files with 89 additions and 147 deletions

View file

@ -3,11 +3,11 @@
"crate2nix": { "crate2nix": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1656671959, "lastModified": 1650460722,
"narHash": "sha256-7GPPgq665fm2k/aXca9P25huieEq7lu4jAwItiZlYFs=", "narHash": "sha256-jk4SZ8iOnfJEceVULjyOAq4MrX9CfU5bCWMyZP9nJVA=",
"owner": "kolloch", "owner": "kolloch",
"repo": "crate2nix", "repo": "crate2nix",
"rev": "91f333aca414ee346bc5bdea76fe9938f73a15f9", "rev": "78258f27fc3121562a44eb02c652a5ec77cf8d02",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -18,11 +18,11 @@
}, },
"flake-utils": { "flake-utils": {
"locked": { "locked": {
"lastModified": 1656065134, "lastModified": 1637014545,
"narHash": "sha256-oc6E6ByIw3oJaIyc67maaFcnjYOz1mMcOtHxbEf9NwQ=", "narHash": "sha256-26IZAc5yzlD9FlDT54io1oqG/bBoyka+FJk5guaX4x4=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "bee6a7250dd1b01844a2de7e02e4df7d8a0a206c", "rev": "bba5dcc8e0b20ab664967ad83d24d64cb64ec4f4",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -33,11 +33,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1656753965, "lastModified": 1655306633,
"narHash": "sha256-BCrB3l0qpJokOnIVc3g2lHiGhnjUi0MoXiw6t1o8H1E=", "narHash": "sha256-nv4FfWWV/dEelByjXJtJkoDPOHIsKfLq50RN3Hqq5Yk=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "0ea7a8f1b939d74e5df8af9a8f7342097cdf69eb", "rev": "b1957596ff1c7aa8c55c4512b7ad1c9672502e8e",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -49,11 +49,11 @@
}, },
"nixpkgs_2": { "nixpkgs_2": {
"locked": { "locked": {
"lastModified": 1656401090, "lastModified": 1637453606,
"narHash": "sha256-bUS2nfQsvTQW2z8SK7oEFSElbmoBahOPtbXPm0AL3I4=", "narHash": "sha256-Gy6cwUswft9xqsjWxFYEnx/63/qzaFUwatcbV5GF/GQ=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "16de63fcc54e88b9a106a603038dd5dd2feb21eb", "rev": "8afc4e543663ca0a6a4f496262cd05233737e732",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -77,11 +77,11 @@
"nixpkgs": "nixpkgs_2" "nixpkgs": "nixpkgs_2"
}, },
"locked": { "locked": {
"lastModified": 1656816597, "lastModified": 1655347556,
"narHash": "sha256-Y3f7wOZIvgr5IGW0u3d8stemjQPPRs4n93DjKJbrvXs=", "narHash": "sha256-JZ06EaeHi9sbbO3n8qYZ8KzDfSbDlPVRHI6Pw4sAxRE=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "bbba5e73a21c8c67d5fe1d4d8b3fde60ab6946cd", "rev": "1cc3dc5aec863c2f724a46f7086fb011004a4e6e",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -92,11 +92,11 @@
}, },
"utils": { "utils": {
"locked": { "locked": {
"lastModified": 1656065134, "lastModified": 1653893745,
"narHash": "sha256-oc6E6ByIw3oJaIyc67maaFcnjYOz1mMcOtHxbEf9NwQ=", "narHash": "sha256-0jntwV3Z8//YwuOjzhV2sgJJPt+HY6KhU7VZUL0fKZQ=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "bee6a7250dd1b01844a2de7e02e4df7d8a0a206c", "rev": "1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1",
"type": "github" "type": "github"
}, },
"original": { "original": {

View file

@ -20,7 +20,7 @@
pkgs = import nixpkgs { pkgs = import nixpkgs {
inherit system; inherit system;
overlays = [ overlays = [
rust-overlay.overlays.default rust-overlay.overlay
(final: prev: { (final: prev: {
rustc = final.rust-bin.stable.latest.default; rustc = final.rust-bin.stable.latest.default;
cargo = final.rust-bin.stable.latest.default; cargo = final.rust-bin.stable.latest.default;

View file

@ -96,7 +96,7 @@ impl DownloadingFile {
fn selected(&self) -> Option<&UploadedFile> { fn selected(&self) -> Option<&UploadedFile> {
match self.selection { match self.selection {
DownloadSelection::All => None, DownloadSelection::All => None,
DownloadSelection::One(n) => Some(self.info.contents.as_ref()?.files.get(n)?), DownloadSelection::One(n) => Some(self.info.contents.as_ref()?.get(n)?),
} }
} }

View file

@ -114,7 +114,7 @@ async fn handle_download(
if let Some(selection) = query.download { if let Some(selection) = query.download {
if let download::DownloadSelection::One(n) = selection { if let download::DownloadSelection::One(n) = selection {
if let Some(ref files) = info.contents { if let Some(ref files) = info.contents {
if n >= files.files.len() { if n >= files.len() {
return not_found(req, data, false); return not_found(req, data, false);
} }
} else { } else {
@ -129,7 +129,7 @@ async fn handle_download(
} }
.into_response(&req)) .into_response(&req))
} else { } else {
let offsets = info.contents.as_ref().map(zip::file_data_offsets); let offsets = info.contents.as_deref().map(zip::file_data_offsets);
Ok(DownloadPage { Ok(DownloadPage {
info: DownloadInfo { info: DownloadInfo {
file: info, file: info,
@ -167,7 +167,7 @@ async fn download_info(
}; };
let storage_path = data.config.storage_dir.join(code); let storage_path = data.config.storage_dir.join(code);
let offsets = info.contents.as_ref().map(zip::file_data_offsets); let offsets = info.contents.as_deref().map(zip::file_data_offsets);
Ok(web::Json(DownloadInfo { Ok(web::Json(DownloadInfo {
file: info, file: info,
code: code.clone(), code: code.clone(),
@ -281,8 +281,7 @@ where
<T as FromStr>::Err: Debug, <T as FromStr>::Err: Debug,
{ {
let val = std::env::var(var).unwrap_or_else(|_| panic!("{} must be set!", var)); let val = std::env::var(var).unwrap_or_else(|_| panic!("{} must be set!", var));
val.parse::<T>() val.parse::<T>().unwrap_or_else(|_| panic!("Invalid value {} for variable {}", val, var))
.unwrap_or_else(|_| panic!("Invalid value {} for variable {}", val, var))
} }
#[actix_web::main] #[actix_web::main]

View file

@ -11,7 +11,6 @@ use rand::{
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_with::skip_serializing_none; use serde_with::skip_serializing_none;
use serde_with::{serde_as, PickFirst, FromInto};
use time::OffsetDateTime; use time::OffsetDateTime;
use tokio::{ use tokio::{
fs::File, fs::File,
@ -19,7 +18,6 @@ use tokio::{
}; };
use crate::upload::UploadedFile; use crate::upload::UploadedFile;
use crate::zip::FileSet;
const STATE_FILE_NAME: &str = "files.json"; const STATE_FILE_NAME: &str = "files.json";
const MAX_STORAGE_FILES: usize = 1024; const MAX_STORAGE_FILES: usize = 1024;
@ -38,7 +36,6 @@ pub fn is_valid_storage_code(s: &str) -> bool {
.all(|c| c.is_ascii_alphanumeric() || c == &b'-') .all(|c| c.is_ascii_alphanumeric() || c == &b'-')
} }
#[serde_as]
#[skip_serializing_none] #[skip_serializing_none]
#[derive(Clone, Deserialize, Serialize)] #[derive(Clone, Deserialize, Serialize)]
pub struct StoredFile { pub struct StoredFile {
@ -48,9 +45,7 @@ pub struct StoredFile {
pub modtime: OffsetDateTime, pub modtime: OffsetDateTime,
#[serde(with = "crate::timestamp")] #[serde(with = "crate::timestamp")]
pub expiry: OffsetDateTime, pub expiry: OffsetDateTime,
#[serde_as(as = "Option<PickFirst<(_, FromInto<Vec<UploadedFile>>)>>")] pub contents: Option<Vec<UploadedFile>>,
#[serde(default)]
pub contents: Option<FileSet>,
} }
async fn is_valid_entry(key: &str, info: &StoredFile, storage_dir: &Path) -> bool { async fn is_valid_entry(key: &str, info: &StoredFile, storage_dir: &Path) -> bool {

View file

@ -6,7 +6,6 @@ use actix_web::web;
use actix_web_actors::ws::{self, CloseCode}; use actix_web_actors::ws::{self, CloseCode};
use bytes::Bytes; use bytes::Bytes;
use log::{debug, error, info, trace}; use log::{debug, error, info, trace};
use sanitise_file_name::{sanitise_with_options, Options as SanOptions};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use time::OffsetDateTime; use time::OffsetDateTime;
use unicode_normalization::UnicodeNormalization; use unicode_normalization::UnicodeNormalization;
@ -14,7 +13,6 @@ use unicode_normalization::UnicodeNormalization;
use crate::{ use crate::{
log_auth_failure, log_auth_failure,
store::{self, FileAddError, StoredFile}, store::{self, FileAddError, StoredFile},
zip::FileSet,
AppState, AppState,
}; };
@ -22,18 +20,8 @@ const MAX_FILES: usize = 256;
const FILENAME_DATE_FORMAT: &[time::format_description::FormatItem] = const FILENAME_DATE_FORMAT: &[time::format_description::FormatItem] =
time::macros::format_description!("[year]-[month]-[day]-[hour][minute][second]"); time::macros::format_description!("[year]-[month]-[day]-[hour][minute][second]");
/// Sanitises a filename after performing unicode normalization, fn sanitise(name: &str) -> String {
/// optionally reducing the length limit to leave space for an sanitise_file_name::sanitise(&name.nfd().collect::<String>())
/// extension yet to be added.
fn sanitise(name: &str, extension_length: usize) -> String {
let name = name.nfd().collect::<String>();
sanitise_with_options(
&name,
&SanOptions {
length_limit: SanOptions::DEFAULT.length_limit - extension_length,
..SanOptions::DEFAULT
},
)
} }
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
@ -120,7 +108,7 @@ pub struct UploadedFile {
impl UploadedFile { impl UploadedFile {
fn new(name: &str, size: u64, modtime: OffsetDateTime) -> Self { fn new(name: &str, size: u64, modtime: OffsetDateTime) -> Self {
Self { Self {
name: sanitise(name, 0), name: sanitise(name),
size, size,
modtime, modtime,
} }
@ -268,7 +256,7 @@ impl Uploader {
let mut file = raw_file.process(); let mut file = raw_file.process();
while filenames.contains(&file.name) { while filenames.contains(&file.name) {
info!("Duplicate file name: {}", file.name); info!("Duplicate file name: {}", file.name);
if file.name.len() >= SanOptions::DEFAULT.length_limit { if file.name.len() >= sanitise_file_name::Options::DEFAULT.length_limit {
return Err(Error::DuplicateFilename); return Err(Error::DuplicateFilename);
} }
file.name.insert(0, '_'); file.name.insert(0, '_');
@ -290,28 +278,25 @@ impl Uploader {
.write(true) .write(true)
.create_new(true) .create_new(true)
.open(&storage_path)?; .open(&storage_path)?;
let (writer, name, size, modtime, contents): (Box<dyn Write>, _, _, _, _) = if files.len() > 1 { let (writer, name, size, modtime): (Box<dyn Write>, _, _, _) = if files.len() > 1 {
info!("Wrapping in zipfile generator"); info!("Wrapping in zipfile generator");
let now = OffsetDateTime::now_utc(); let now = OffsetDateTime::now_utc();
let collection_name = let zip_writer = super::zip::ZipGenerator::new(files.clone(), writer);
collection_name.map(|f| sanitise(&f, 4)).unwrap_or_else(|| {
super::APP_NAME.to_owned() + &now.format(FILENAME_DATE_FORMAT).unwrap()
});
let file_set = FileSet {
files,
directory_name: Some(collection_name.clone()),
};
let zip_writer =
super::zip::ZipGenerator::new(file_set.clone(), writer);
let size = zip_writer.total_size(); let size = zip_writer.total_size();
(Box::new(zip_writer), collection_name + ".zip", size, now, Some(file_set)) let download_filename = collection_name
.map(|f| sanitise(&(f + ".zip")))
.unwrap_or_else(|| {
super::APP_NAME.to_owned()
+ &now.format(FILENAME_DATE_FORMAT).unwrap()
+ ".zip"
});
(Box::new(zip_writer), download_filename, size, now)
} else { } else {
( (
Box::new(writer), Box::new(writer),
files[0].name.clone(), files[0].name.clone(),
files[0].size, files[0].size,
files[0].modtime, files[0].modtime,
None
) )
}; };
self.writer = Some(writer); self.writer = Some(writer);
@ -320,7 +305,7 @@ impl Uploader {
size, size,
modtime, modtime,
expiry: OffsetDateTime::now_utc() + lifetime * time::Duration::DAY, expiry: OffsetDateTime::now_utc() + lifetime * time::Duration::DAY,
contents, contents: if files.len() > 1 { Some(files) } else { None },
}; };
let state = self.app_state.clone(); let state = self.app_state.clone();
let storage_filename = self.storage_filename.clone(); let storage_filename = self.storage_filename.clone();

View file

@ -2,13 +2,10 @@ use std::io::Write;
use crc32fast::Hasher; use crc32fast::Hasher;
use log::debug; use log::debug;
use serde::{Deserialize, Serialize};
use time::OffsetDateTime; use time::OffsetDateTime;
use crate::upload::UploadedFile; use crate::upload::UploadedFile;
const SLASH: u8 = 0x2f;
const SIGNATURE_SIZE: u64 = 4; const SIGNATURE_SIZE: u64 = 4;
const SHARED_FIELDS_SIZE: u64 = 26; const SHARED_FIELDS_SIZE: u64 = 26;
const EXTRA_FIELD_SIZE: u64 = 41; const EXTRA_FIELD_SIZE: u64 = 41;
@ -28,67 +25,45 @@ const EOCD_TOTAL_SIZE: u64 = EOCD64_RECORD_SIZE + EOCD64_LOCATOR_SIZE + EOCD_REC
const EMPTY_STRING_CRC32: u32 = 0; const EMPTY_STRING_CRC32: u32 = 0;
#[derive(Clone, Deserialize, Serialize)] fn file_entry_size(file: &UploadedFile) -> u64 {
pub struct FileSet { FILE_ENTRY_SIZE_MINUS_FILENAME_AND_FILE + file.name.len() as u64 + file.size
pub files: Vec<UploadedFile>,
// Optional for backwards compatibility only
pub directory_name: Option<String>,
} }
impl From<Vec<UploadedFile>> for FileSet { fn file_entries_size(files: &[UploadedFile]) -> u64 {
fn from(files: Vec<UploadedFile>) -> Self {
Self { files, directory_name: None }
}
}
fn full_file_name_len(file: &UploadedFile, directory_name: &Option<String>) -> u64 {
file.name.len() as u64 + if let Some(d) = directory_name {
d.len() as u64 + 1
} else {
0
}
}
fn file_entry_size(file: &UploadedFile, directory_name: &Option<String>) -> u64 {
FILE_ENTRY_SIZE_MINUS_FILENAME_AND_FILE + full_file_name_len(file, directory_name) + file.size
}
fn file_entries_size(files: &FileSet, bound: Option<usize>) -> u64 {
let mut total = 0; let mut total = 0;
let fs = if let Some(n) = bound { &files.files[..n] } else { &files.files }; for file in files.iter() {
for file in fs.iter() { total += file_entry_size(file)
total += file_entry_size(file, &files.directory_name)
} }
total total
} }
pub fn file_data_offset(files: &FileSet, idx: usize) -> u64 { pub fn file_data_offset(files: &[UploadedFile], idx: usize) -> u64 {
file_entries_size(files, Some(idx)) file_entries_size(&files[..idx])
+ LOCAL_HEADER_SIZE_MINUS_FILENAME + LOCAL_HEADER_SIZE_MINUS_FILENAME
+ full_file_name_len(&files.files[idx], &files.directory_name) + files[idx].name.len() as u64
} }
pub fn file_data_offsets(files: &FileSet) -> Vec<u64> { pub fn file_data_offsets(files: &[UploadedFile]) -> Vec<u64> {
let mut offsets = Vec::new(); let mut offsets = Vec::new();
let mut offset: u64 = 0; let mut offset: u64 = 0;
for file in files.files.iter() { for file in files.iter() {
offset += LOCAL_HEADER_SIZE_MINUS_FILENAME + full_file_name_len(file, &files.directory_name); offset += LOCAL_HEADER_SIZE_MINUS_FILENAME + file.name.len() as u64;
offsets.push(offset); offsets.push(offset);
offset += file.size + DATA_DESCRIPTOR_SIZE; offset += file.size + DATA_DESCRIPTOR_SIZE;
} }
offsets offsets
} }
fn central_directory_size(files: &FileSet) -> u64 { fn central_directory_size(files: &[UploadedFile]) -> u64 {
let mut total = 0; let mut total = 0;
for file in files.files.iter() { for file in files.iter() {
total += CENTRAL_DIRECTORY_HEADER_SIZE_MINUS_FILENAME + full_file_name_len(file, &files.directory_name); total += CENTRAL_DIRECTORY_HEADER_SIZE_MINUS_FILENAME + file.name.len() as u64;
} }
total total
} }
fn zipfile_size(files: &FileSet) -> u64 { fn zipfile_size(files: &[UploadedFile]) -> u64 {
file_entries_size(files, None) + central_directory_size(files) + EOCD_TOTAL_SIZE file_entries_size(files) + central_directory_size(files) + EOCD_TOTAL_SIZE
} }
fn fat_timestamp(time: OffsetDateTime) -> u32 { fn fat_timestamp(time: OffsetDateTime) -> u32 {
@ -125,7 +100,7 @@ impl UploadedFile {
/// Returns the fields shared by the ZIP local file header and /// Returns the fields shared by the ZIP local file header and
/// central directory file header - "Version needed to extract" /// central directory file header - "Version needed to extract"
/// through "Extra field length". /// through "Extra field length".
fn shared_header_fields(&self, directory_name: &Option<String>, hash: Option<u32>) -> Vec<u8> { fn shared_header_fields(&self, hash: Option<u32>) -> Vec<u8> {
let mut fields = vec![ let mut fields = vec![
45, 0, // Minimum version required to extract: 4.5 for ZIP64 45, 0, // Minimum version required to extract: 4.5 for ZIP64
0b00001000, // General purpose bit flag: bit 3 - size and CRC-32 in data descriptor 0b00001000, // General purpose bit flag: bit 3 - size and CRC-32 in data descriptor
@ -137,7 +112,7 @@ impl UploadedFile {
append_value(&mut fields, hash.unwrap_or(0) as u64, 4); append_value(&mut fields, hash.unwrap_or(0) as u64, 4);
// Placeholders for compressed and uncompressed size in ZIP64 record, 4 bytes each // Placeholders for compressed and uncompressed size in ZIP64 record, 4 bytes each
append_ff(&mut fields, 8); append_ff(&mut fields, 8);
append_value(&mut fields, full_file_name_len(self, directory_name), 2); append_value(&mut fields, self.name.len() as u64, 2);
// Extra field length: 32 bytes for zip64, 9 bytes for timestamp // Extra field length: 32 bytes for zip64, 9 bytes for timestamp
fields.append(&mut vec![41, 0]); fields.append(&mut vec![41, 0]);
fields fields
@ -167,34 +142,24 @@ impl UploadedFile {
field field
} }
fn full_name_bytes(&self, directory_name: &Option<String>) -> Vec<u8> { fn local_header(&self, local_header_offset: u64) -> Vec<u8> {
let mut b = vec![];
if let Some(d) = directory_name {
b.append(&mut d.to_owned().into_bytes());
b.push(SLASH);
}
b.append(&mut self.name.clone().into_bytes());
b
}
fn local_header(&self, directory_name: &Option<String>, local_header_offset: u64) -> Vec<u8> {
let mut header = vec![0x50, 0x4b, 0x03, 0x04]; // Local file header signature let mut header = vec![0x50, 0x4b, 0x03, 0x04]; // Local file header signature
header.append(&mut self.shared_header_fields(directory_name, None)); header.append(&mut self.shared_header_fields(None));
header.append(&mut self.full_name_bytes(directory_name)); header.append(&mut self.name.clone().into_bytes());
header.append(&mut self.extra_field(local_header_offset)); header.append(&mut self.extra_field(local_header_offset));
header header
} }
fn central_directory_header(&self, directory_name: &Option<String>, local_header_offset: u64, hash: u32) -> Vec<u8> { fn central_directory_header(&self, local_header_offset: u64, hash: u32) -> Vec<u8> {
let mut header = vec![ let mut header = vec![
0x50, 0x4b, 0x01, 0x02, // Central directory file header signature 0x50, 0x4b, 0x01, 0x02, // Central directory file header signature
45, 3, // Made by a Unix system supporting version 4.5 45, 3, // Made by a Unix system supporting version 4.5
]; ];
header.append(&mut self.shared_header_fields(directory_name, Some(hash))); header.append(&mut self.shared_header_fields(Some(hash)));
append_0(&mut header, 8); // Comment length, disk number, internal attributes, DOS external attributes append_0(&mut header, 8); // Comment length, disk number, internal attributes, DOS external attributes
append_value(&mut header, 0o100644, 2); // Unix external file attributes: -rw-r--r-- append_value(&mut header, 0o100644, 2); // Unix external file attributes: -rw-r--r--
append_ff(&mut header, 4); // Relative offset of local file header: placeholder, see ZIP64 data append_ff(&mut header, 4); // Relative offset of local file header: placeholder, see ZIP64 data
header.append(&mut self.full_name_bytes(directory_name)); header.append(&mut self.name.clone().into_bytes());
header.append(&mut self.extra_field(local_header_offset)); header.append(&mut self.extra_field(local_header_offset));
header header
} }
@ -209,8 +174,8 @@ impl UploadedFile {
} }
} }
fn end_of_central_directory(files: &FileSet) -> Vec<u8> { fn end_of_central_directory(files: &[UploadedFile]) -> Vec<u8> {
let entries_size = file_entries_size(files, None); let entries_size = file_entries_size(files);
let directory_size = central_directory_size(files); let directory_size = central_directory_size(files);
let mut eocd = vec![ let mut eocd = vec![
@ -223,10 +188,9 @@ fn end_of_central_directory(files: &FileSet) -> Vec<u8> {
45, 0, // Minimum version 4.5 to extract 45, 0, // Minimum version 4.5 to extract
]); ]);
append_0(&mut eocd, 8); // Two 4-byte disk numbers, both 0 append_0(&mut eocd, 8); // Two 4-byte disk numbers, both 0
// Number of central directory records, on this disk and in total
// Number of central directory records, on this disk and in total append_value(&mut eocd, files.len() as u64, 8);
append_value(&mut eocd, files.files.len() as u64, 8); append_value(&mut eocd, files.len() as u64, 8);
append_value(&mut eocd, files.files.len() as u64, 8);
append_value(&mut eocd, directory_size, 8); append_value(&mut eocd, directory_size, 8);
append_value(&mut eocd, entries_size, 8); // Offset of start of central directory append_value(&mut eocd, entries_size, 8); // Offset of start of central directory
@ -243,7 +207,7 @@ fn end_of_central_directory(files: &FileSet) -> Vec<u8> {
} }
pub struct ZipGenerator<W: Write> { pub struct ZipGenerator<W: Write> {
files: FileSet, files: Vec<UploadedFile>,
file_index: usize, file_index: usize,
byte_index: u64, byte_index: u64,
pending_metadata: Vec<u8>, pending_metadata: Vec<u8>,
@ -253,7 +217,7 @@ pub struct ZipGenerator<W: Write> {
} }
impl<W: Write> ZipGenerator<W> { impl<W: Write> ZipGenerator<W> {
pub fn new(files: FileSet, output: W) -> Self { pub fn new(files: Vec<UploadedFile>, output: W) -> Self {
let mut result = Self { let mut result = Self {
files, files,
file_index: 0, file_index: 0,
@ -275,39 +239,39 @@ impl<W: Write> ZipGenerator<W> {
let hash = std::mem::replace(&mut self.hasher, Hasher::new()).finalize(); let hash = std::mem::replace(&mut self.hasher, Hasher::new()).finalize();
self.hashes.push(hash); self.hashes.push(hash);
self.pending_metadata self.pending_metadata
.append(&mut self.files.files[self.file_index].data_descriptor(hash)); .append(&mut self.files[self.file_index].data_descriptor(hash));
debug!( debug!(
"Finishing file entry in zipfile: {}, hash {:x}", "Finishing file entry in zipfile: {}, hash {:x}",
self.files.files[self.file_index].name, hash self.files[self.file_index].name, hash
); );
self.file_index += 1; self.file_index += 1;
self.start_new_file(); self.start_new_file();
} }
fn start_new_file(&mut self) { fn start_new_file(&mut self) {
let mut offset = file_entries_size(&self.files, Some(self.file_index)); let mut offset = file_entries_size(&self.files[..self.file_index]);
while self.file_index < self.files.files.len() && self.files.files[self.file_index].size == 0 { while self.file_index < self.files.len() && self.files[self.file_index].size == 0 {
debug!( debug!(
"Empty file entry in zipfile: {}", "Empty file entry in zipfile: {}",
self.files.files[self.file_index].name self.files[self.file_index].name
); );
self.hashes.push(EMPTY_STRING_CRC32); self.hashes.push(EMPTY_STRING_CRC32);
let mut local_header = self.files.files[self.file_index].local_header(&self.files.directory_name, offset); let mut local_header = self.files[self.file_index].local_header(offset);
let mut data_descriptor = let mut data_descriptor =
self.files.files[self.file_index].data_descriptor(EMPTY_STRING_CRC32); self.files[self.file_index].data_descriptor(EMPTY_STRING_CRC32);
offset += local_header.len() as u64 + data_descriptor.len() as u64; offset += local_header.len() as u64 + data_descriptor.len() as u64;
self.file_index += 1; self.file_index += 1;
self.pending_metadata.append(&mut local_header); self.pending_metadata.append(&mut local_header);
self.pending_metadata.append(&mut data_descriptor); self.pending_metadata.append(&mut data_descriptor);
} }
if self.file_index < self.files.files.len() { if self.file_index < self.files.len() {
debug!( debug!(
"Starting file entry in zipfile: {}", "Starting file entry in zipfile: {}",
self.files.files[self.file_index].name self.files[self.file_index].name
); );
self.byte_index = 0; self.byte_index = 0;
self.pending_metadata self.pending_metadata
.append(&mut self.files.files[self.file_index].local_header(&self.files.directory_name, offset)); .append(&mut self.files[self.file_index].local_header(offset));
} else { } else {
self.finish_zipfile(); self.finish_zipfile();
} }
@ -316,14 +280,14 @@ impl<W: Write> ZipGenerator<W> {
fn finish_zipfile(&mut self) { fn finish_zipfile(&mut self) {
debug!("Writing zipfile central directory"); debug!("Writing zipfile central directory");
let mut offset = 0; let mut offset = 0;
for (i, file) in self.files.files.iter().enumerate() { for (i, file) in self.files.iter().enumerate() {
debug!( debug!(
"Writing central directory entry: {}, hash {}", "Writing central directory entry: {}, hash {}",
file.name, self.hashes[i] file.name, self.hashes[i]
); );
self.pending_metadata self.pending_metadata
.append(&mut file.central_directory_header(&self.files.directory_name, offset, self.hashes[i])); .append(&mut file.central_directory_header(offset, self.hashes[i]));
offset += file_entry_size(file, &self.files.directory_name); offset += file_entry_size(file);
} }
debug!("Writing end of central directory"); debug!("Writing end of central directory");
self.pending_metadata self.pending_metadata
@ -344,10 +308,10 @@ impl<W: Write> Write for ZipGenerator<W> {
} }
} }
} }
if self.file_index >= self.files.files.len() { if self.file_index >= self.files.len() {
return Ok(0); return Ok(0);
} }
let bytes_remaining = self.files.files[self.file_index].size - self.byte_index; let bytes_remaining = self.files[self.file_index].size - self.byte_index;
if bytes_remaining < (buf.len() as u64) { if bytes_remaining < (buf.len() as u64) {
buf = &buf[..bytes_remaining as usize]; buf = &buf[..bytes_remaining as usize];
} }
@ -383,8 +347,7 @@ mod tests {
fn test_no_files() { fn test_no_files() {
let mut output: Vec<u8> = vec![]; let mut output: Vec<u8> = vec![];
{ {
let mut zipgen = let mut zipgen = ZipGenerator::new(vec![], Box::new(std::io::Cursor::new(&mut output)));
ZipGenerator::new(FileSet { files: vec![], directory_name: "test".to_owned() }, Box::new(std::io::Cursor::new(&mut output)));
zipgen.write_all(&[]).unwrap(); zipgen.write_all(&[]).unwrap();
zipgen.flush().unwrap(); zipgen.flush().unwrap();
} }

View file

@ -7,7 +7,7 @@
{% let formatted_total_size = bytesize::to_string(info.file.size.clone(), false).replace(" ", "") -%} {% let formatted_total_size = bytesize::to_string(info.file.size.clone(), false).replace(" ", "") -%}
{% match info.file.contents -%} {% match info.file.contents -%}
{% when Some with (files) -%} {% when Some with (files) -%}
{{ files.files.len() }} files, {{ formatted_total_size }} total {{ files.len() }} files, {{ formatted_total_size }} total
{%- else -%} {%- else -%}
{{ formatted_total_size }} {{ formatted_total_size }}
{%- endmatch %}, expires {{ info.file.expiry.format(DATE_DISPLAY_FORMAT).unwrap() }} {%- endmatch %}, expires {{ info.file.expiry.format(DATE_DISPLAY_FORMAT).unwrap() }}
@ -37,7 +37,7 @@
<summary>Show file list</summary> <summary>Show file list</summary>
<table><tbody> <table><tbody>
{% let offsets = info.offsets.as_ref().unwrap() %} {% let offsets = info.offsets.as_ref().unwrap() %}
{% for f in files.files %} {% for f in files %}
<tr class="{% if offsets.get(loop.index0.clone()).unwrap().clone() > info.available %}unavailable{% endif %}"> <tr class="{% if offsets.get(loop.index0.clone()).unwrap().clone() > info.available %}unavailable{% endif %}">
<td class="file_size">{{ bytesize::to_string(f.size.clone(), false).replace(" ", "") }}</td> <td class="file_size">{{ bytesize::to_string(f.size.clone(), false).replace(" ", "") }}</td>
<td class="file_name">{{ f.name }}</td> <td class="file_name">{{ f.name }}</td>