WIP file drop server, no downloads yet
This commit is contained in:
commit
20da86132b
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
/target
|
1643
Cargo.lock
generated
Normal file
1643
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
30
Cargo.toml
Normal file
30
Cargo.toml
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
[package]
|
||||||
|
name = "transbeam"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["xenofem <xenofem@xeno.science>"]
|
||||||
|
edition = "2021"
|
||||||
|
license = "MIT"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
actix = "0.13"
|
||||||
|
actix-files = "0.6.0"
|
||||||
|
actix-http = "3.0.4"
|
||||||
|
actix-web = "4.0.1"
|
||||||
|
actix-web-actors = "4.1.0"
|
||||||
|
bytes = "1.1.0"
|
||||||
|
crc32fast = "1.3.2"
|
||||||
|
env_logger = "0.9"
|
||||||
|
futures = "0.3"
|
||||||
|
log = "0.4"
|
||||||
|
rand = "0.8.5"
|
||||||
|
sanitise-file-name = "1.0.0"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
thiserror = "1"
|
||||||
|
time = "0.3.9"
|
||||||
|
tokio = { version = "1.17.0", features = ["full"] }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
zip = "0.6.2"
|
16
README.md
Normal file
16
README.md
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
# transbeam
|
||||||
|
|
||||||
|
A low-frills low-latency file drop server
|
||||||
|
|
||||||
|
## installation
|
||||||
|
|
||||||
|
## todo
|
||||||
|
|
||||||
|
- [ ] file uploading
|
||||||
|
- [ ] real-time file downloading
|
||||||
|
- [ ] upload progress bar
|
||||||
|
- [ ] uploader auth
|
||||||
|
- [ ] downloader auth
|
||||||
|
- [ ] sanitize filenames
|
||||||
|
- [ ] make sure upload errors are passed along to downloaders in a halfway reasonable way
|
||||||
|
- [ ] delete uploads after a while
|
769
src/download.rs
Normal file
769
src/download.rs
Normal file
|
@ -0,0 +1,769 @@
|
||||||
|
use std::{
|
||||||
|
fs::Metadata,
|
||||||
|
io,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
|
};
|
||||||
|
|
||||||
|
use actix_web::{
|
||||||
|
body::{self, BoxBody, SizedStream},
|
||||||
|
dev::{
|
||||||
|
self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory,
|
||||||
|
ServiceRequest, ServiceResponse,
|
||||||
|
},
|
||||||
|
http::{
|
||||||
|
header::{
|
||||||
|
self, Charset, ContentDisposition, ContentEncoding, DispositionParam,
|
||||||
|
DispositionType, ExtendedValue, HeaderValue,
|
||||||
|
},
|
||||||
|
StatusCode,
|
||||||
|
},
|
||||||
|
Error, HttpMessage, HttpRequest, HttpResponse, Responder,
|
||||||
|
};
|
||||||
|
use bitflags::bitflags;
|
||||||
|
use derive_more::{Deref, DerefMut};
|
||||||
|
use futures_core::future::LocalBoxFuture;
|
||||||
|
use mime::Mime;
|
||||||
|
use mime_guess::from_path;
|
||||||
|
|
||||||
|
use crate::{encoding::equiv_utf8_text, range::HttpRange};
|
||||||
|
|
||||||
|
bitflags! {
|
||||||
|
pub(crate) struct Flags: u8 {
|
||||||
|
const ETAG = 0b0000_0001;
|
||||||
|
const LAST_MD = 0b0000_0010;
|
||||||
|
const CONTENT_DISPOSITION = 0b0000_0100;
|
||||||
|
const PREFER_UTF8 = 0b0000_1000;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Flags {
|
||||||
|
fn default() -> Self {
|
||||||
|
Flags::from_bits_truncate(0b0000_1111)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A file with an associated name.
|
||||||
|
///
|
||||||
|
/// `NamedFile` can be registered as services:
|
||||||
|
/// ```
|
||||||
|
/// use actix_web::App;
|
||||||
|
/// use actix_files::NamedFile;
|
||||||
|
///
|
||||||
|
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
/// let file = NamedFile::open_async("./static/index.html").await?;
|
||||||
|
/// let app = App::new().service(file);
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// They can also be returned from handlers:
|
||||||
|
/// ```
|
||||||
|
/// use actix_web::{Responder, get};
|
||||||
|
/// use actix_files::NamedFile;
|
||||||
|
///
|
||||||
|
/// #[get("/")]
|
||||||
|
/// async fn index() -> impl Responder {
|
||||||
|
/// NamedFile::open_async("./static/index.html").await
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Deref, DerefMut)]
|
||||||
|
pub struct NamedFile {
|
||||||
|
#[deref]
|
||||||
|
#[deref_mut]
|
||||||
|
file: File,
|
||||||
|
path: PathBuf,
|
||||||
|
modified: Option<SystemTime>,
|
||||||
|
pub(crate) md: Metadata,
|
||||||
|
pub(crate) flags: Flags,
|
||||||
|
pub(crate) status_code: StatusCode,
|
||||||
|
pub(crate) content_type: Mime,
|
||||||
|
pub(crate) content_disposition: ContentDisposition,
|
||||||
|
pub(crate) encoding: Option<ContentEncoding>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) use std::fs::File;
|
||||||
|
|
||||||
|
use super::chunked;
|
||||||
|
|
||||||
|
impl NamedFile {
|
||||||
|
/// Creates an instance from a previously opened file.
|
||||||
|
///
|
||||||
|
/// The given `path` need not exist and is only used to determine the `ContentType` and
|
||||||
|
/// `ContentDisposition` headers.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
/// ```ignore
|
||||||
|
/// use std::{
|
||||||
|
/// io::{self, Write as _},
|
||||||
|
/// env,
|
||||||
|
/// fs::File
|
||||||
|
/// };
|
||||||
|
/// use actix_files::NamedFile;
|
||||||
|
///
|
||||||
|
/// let mut file = File::create("foo.txt")?;
|
||||||
|
/// file.write_all(b"Hello, world!")?;
|
||||||
|
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
|
||||||
|
/// # std::fs::remove_file("foo.txt");
|
||||||
|
/// Ok(())
|
||||||
|
/// ```
|
||||||
|
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
|
||||||
|
let path = path.as_ref().to_path_buf();
|
||||||
|
|
||||||
|
// Get the name of the file and use it to construct default Content-Type
|
||||||
|
// and Content-Disposition values
|
||||||
|
let (content_type, content_disposition) = {
|
||||||
|
let filename = match path.file_name() {
|
||||||
|
Some(name) => name.to_string_lossy(),
|
||||||
|
None => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"Provided path has no filename",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let ct = from_path(&path).first_or_octet_stream();
|
||||||
|
|
||||||
|
let disposition = match ct.type_() {
|
||||||
|
mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline,
|
||||||
|
mime::APPLICATION => match ct.subtype() {
|
||||||
|
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
|
||||||
|
name if name == "wasm" => DispositionType::Inline,
|
||||||
|
_ => DispositionType::Attachment,
|
||||||
|
},
|
||||||
|
_ => DispositionType::Attachment,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut parameters =
|
||||||
|
vec![DispositionParam::Filename(String::from(filename.as_ref()))];
|
||||||
|
|
||||||
|
if !filename.is_ascii() {
|
||||||
|
parameters.push(DispositionParam::FilenameExt(ExtendedValue {
|
||||||
|
charset: Charset::Ext(String::from("UTF-8")),
|
||||||
|
language_tag: None,
|
||||||
|
value: filename.into_owned().into_bytes(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
let cd = ContentDisposition {
|
||||||
|
disposition,
|
||||||
|
parameters,
|
||||||
|
};
|
||||||
|
|
||||||
|
(ct, cd)
|
||||||
|
};
|
||||||
|
|
||||||
|
let md = {
|
||||||
|
{
|
||||||
|
file.metadata()?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let modified = md.modified().ok();
|
||||||
|
let encoding = None;
|
||||||
|
|
||||||
|
Ok(NamedFile {
|
||||||
|
path,
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
md,
|
||||||
|
modified,
|
||||||
|
encoding,
|
||||||
|
status_code: StatusCode::OK,
|
||||||
|
flags: Flags::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to open a file in read-only mode.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
/// ```
|
||||||
|
/// use actix_files::NamedFile;
|
||||||
|
/// let file = NamedFile::open("foo.txt");
|
||||||
|
/// ```
|
||||||
|
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
|
||||||
|
let file = File::open(&path)?;
|
||||||
|
Self::from_file(file, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to open a file asynchronously in read-only mode.
|
||||||
|
///
|
||||||
|
/// When the `experimental-io-uring` crate feature is enabled, this will be async. Otherwise, it
|
||||||
|
/// will behave just like `open`.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
/// ```
|
||||||
|
/// use actix_files::NamedFile;
|
||||||
|
/// # async fn open() {
|
||||||
|
/// let file = NamedFile::open_async("foo.txt").await.unwrap();
|
||||||
|
/// # }
|
||||||
|
/// ```
|
||||||
|
pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
|
||||||
|
let file = {
|
||||||
|
{
|
||||||
|
File::open(&path)?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Self::from_file(file, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns reference to the underlying file object.
|
||||||
|
#[inline]
|
||||||
|
pub fn file(&self) -> &File {
|
||||||
|
&self.file
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the filesystem path to this file.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
/// ```
|
||||||
|
/// # use std::io;
|
||||||
|
/// use actix_files::NamedFile;
|
||||||
|
///
|
||||||
|
/// # async fn path() -> io::Result<()> {
|
||||||
|
/// let file = NamedFile::open_async("test.txt").await?;
|
||||||
|
/// assert_eq!(file.path().as_os_str(), "foo.txt");
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn path(&self) -> &Path {
|
||||||
|
self.path.as_path()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the time the file was last modified.
|
||||||
|
///
|
||||||
|
/// Returns `None` only on unsupported platforms; see [`std::fs::Metadata::modified()`].
|
||||||
|
/// Therefore, it is usually safe to unwrap this.
|
||||||
|
#[inline]
|
||||||
|
pub fn modified(&self) -> Option<SystemTime> {
|
||||||
|
self.modified
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the filesystem metadata associated with this file.
|
||||||
|
#[inline]
|
||||||
|
pub fn metadata(&self) -> &Metadata {
|
||||||
|
&self.md
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `Content-Type` header that will be used when serving this file.
|
||||||
|
#[inline]
|
||||||
|
pub fn content_type(&self) -> &Mime {
|
||||||
|
&self.content_type
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `Content-Disposition` that will be used when serving this file.
|
||||||
|
#[inline]
|
||||||
|
pub fn content_disposition(&self) -> &ContentDisposition {
|
||||||
|
&self.content_disposition
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `Content-Encoding` that will be used when serving this file.
|
||||||
|
///
|
||||||
|
/// A return value of `None` indicates that the content is not already using a compressed
|
||||||
|
/// representation and may be subject to compression downstream.
|
||||||
|
#[inline]
|
||||||
|
pub fn content_encoding(&self) -> Option<ContentEncoding> {
|
||||||
|
self.encoding
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set response status code.
|
||||||
|
#[deprecated(since = "0.7.0", note = "Prefer `Responder::customize()`.")]
|
||||||
|
pub fn set_status_code(mut self, status: StatusCode) -> Self {
|
||||||
|
self.status_code = status;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the `Content-Type` header that will be used when serving this file. By default the
|
||||||
|
/// `Content-Type` is inferred from the filename extension.
|
||||||
|
#[inline]
|
||||||
|
pub fn set_content_type(mut self, mime_type: Mime) -> Self {
|
||||||
|
self.content_type = mime_type;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the Content-Disposition for serving this file. This allows changing the
|
||||||
|
/// `inline/attachment` disposition as well as the filename sent to the peer.
|
||||||
|
///
|
||||||
|
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and
|
||||||
|
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the
|
||||||
|
/// filename is taken from the path provided in the `open` method after converting it to UTF-8
|
||||||
|
/// (using `to_string_lossy`).
|
||||||
|
#[inline]
|
||||||
|
pub fn set_content_disposition(mut self, cd: ContentDisposition) -> Self {
|
||||||
|
self.content_disposition = cd;
|
||||||
|
self.flags.insert(Flags::CONTENT_DISPOSITION);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disables `Content-Disposition` header.
|
||||||
|
///
|
||||||
|
/// By default, the `Content-Disposition` header is sent.
|
||||||
|
#[inline]
|
||||||
|
pub fn disable_content_disposition(mut self) -> Self {
|
||||||
|
self.flags.remove(Flags::CONTENT_DISPOSITION);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets content encoding for this file.
|
||||||
|
///
|
||||||
|
/// This prevents the `Compress` middleware from modifying the file contents and signals to
|
||||||
|
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
|
||||||
|
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
|
||||||
|
#[inline]
|
||||||
|
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
|
||||||
|
self.encoding = Some(enc);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Specifies whether to return `ETag` header in response.
|
||||||
|
///
|
||||||
|
/// Default is true.
|
||||||
|
#[inline]
|
||||||
|
pub fn use_etag(mut self, value: bool) -> Self {
|
||||||
|
self.flags.set(Flags::ETAG, value);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Specifies whether to return `Last-Modified` header in response.
|
||||||
|
///
|
||||||
|
/// Default is true.
|
||||||
|
#[inline]
|
||||||
|
pub fn use_last_modified(mut self, value: bool) -> Self {
|
||||||
|
self.flags.set(Flags::LAST_MD, value);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Specifies whether text responses should signal a UTF-8 encoding.
|
||||||
|
///
|
||||||
|
/// Default is false (but will default to true in a future version).
|
||||||
|
#[inline]
|
||||||
|
pub fn prefer_utf8(mut self, value: bool) -> Self {
|
||||||
|
self.flags.set(Flags::PREFER_UTF8, value);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an `ETag` in a format is similar to Apache's.
|
||||||
|
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
|
||||||
|
self.modified.as_ref().map(|mtime| {
|
||||||
|
let ino = {
|
||||||
|
#[cfg(unix)]
|
||||||
|
{
|
||||||
|
#[cfg(unix)]
|
||||||
|
use std::os::unix::fs::MetadataExt as _;
|
||||||
|
|
||||||
|
self.md.ino()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
{
|
||||||
|
0
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let dur = mtime
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.expect("modification time must be after epoch");
|
||||||
|
|
||||||
|
header::EntityTag::new_strong(format!(
|
||||||
|
"{:x}:{:x}:{:x}:{:x}",
|
||||||
|
ino,
|
||||||
|
self.md.len(),
|
||||||
|
dur.as_secs(),
|
||||||
|
dur.subsec_nanos()
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn last_modified(&self) -> Option<header::HttpDate> {
|
||||||
|
self.modified.map(|mtime| mtime.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an `HttpResponse` with file as a streaming body.
|
||||||
|
pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> {
|
||||||
|
if self.status_code != StatusCode::OK {
|
||||||
|
let mut res = HttpResponse::build(self.status_code);
|
||||||
|
|
||||||
|
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
|
||||||
|
equiv_utf8_text(self.content_type.clone())
|
||||||
|
} else {
|
||||||
|
self.content_type
|
||||||
|
};
|
||||||
|
|
||||||
|
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
|
||||||
|
|
||||||
|
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
|
||||||
|
res.insert_header((
|
||||||
|
header::CONTENT_DISPOSITION,
|
||||||
|
self.content_disposition.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(current_encoding) = self.encoding {
|
||||||
|
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file);
|
||||||
|
|
||||||
|
return res.streaming(reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
let etag = if self.flags.contains(Flags::ETAG) {
|
||||||
|
self.etag()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let last_modified = if self.flags.contains(Flags::LAST_MD) {
|
||||||
|
self.last_modified()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// check preconditions
|
||||||
|
let precondition_failed = if !any_match(etag.as_ref(), req) {
|
||||||
|
true
|
||||||
|
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
|
||||||
|
(last_modified, req.get_header())
|
||||||
|
{
|
||||||
|
let t1: SystemTime = (*m).into();
|
||||||
|
let t2: SystemTime = (*since).into();
|
||||||
|
|
||||||
|
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
|
||||||
|
(Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
// check last modified
|
||||||
|
let not_modified = if !none_match(etag.as_ref(), req) {
|
||||||
|
true
|
||||||
|
} else if req.headers().contains_key(header::IF_NONE_MATCH) {
|
||||||
|
false
|
||||||
|
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
|
||||||
|
(last_modified, req.get_header())
|
||||||
|
{
|
||||||
|
let t1: SystemTime = (*m).into();
|
||||||
|
let t2: SystemTime = (*since).into();
|
||||||
|
|
||||||
|
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
|
||||||
|
(Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut res = HttpResponse::build(self.status_code);
|
||||||
|
|
||||||
|
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
|
||||||
|
equiv_utf8_text(self.content_type.clone())
|
||||||
|
} else {
|
||||||
|
self.content_type
|
||||||
|
};
|
||||||
|
|
||||||
|
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
|
||||||
|
|
||||||
|
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
|
||||||
|
res.insert_header((
|
||||||
|
header::CONTENT_DISPOSITION,
|
||||||
|
self.content_disposition.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(current_encoding) = self.encoding {
|
||||||
|
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(lm) = last_modified {
|
||||||
|
res.insert_header((header::LAST_MODIFIED, lm.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(etag) = etag {
|
||||||
|
res.insert_header((header::ETAG, etag.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
res.insert_header((header::ACCEPT_RANGES, "bytes"));
|
||||||
|
|
||||||
|
let mut length = self.md.len();
|
||||||
|
let mut offset = 0;
|
||||||
|
|
||||||
|
// check for range header
|
||||||
|
if let Some(ranges) = req.headers().get(header::RANGE) {
|
||||||
|
if let Ok(ranges_header) = ranges.to_str() {
|
||||||
|
if let Ok(ranges) = HttpRange::parse(ranges_header, length) {
|
||||||
|
length = ranges[0].length;
|
||||||
|
offset = ranges[0].start;
|
||||||
|
|
||||||
|
// don't allow compression middleware to modify partial content
|
||||||
|
res.insert_header((
|
||||||
|
header::CONTENT_ENCODING,
|
||||||
|
HeaderValue::from_static("identity"),
|
||||||
|
));
|
||||||
|
|
||||||
|
res.insert_header((
|
||||||
|
header::CONTENT_RANGE,
|
||||||
|
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()),
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length)));
|
||||||
|
return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish();
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
return res.status(StatusCode::BAD_REQUEST).finish();
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
if precondition_failed {
|
||||||
|
return res.status(StatusCode::PRECONDITION_FAILED).finish();
|
||||||
|
} else if not_modified {
|
||||||
|
return res
|
||||||
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
|
.body(body::None::new())
|
||||||
|
.map_into_boxed_body();
|
||||||
|
}
|
||||||
|
|
||||||
|
let reader = chunked::new_chunked_read(length, offset, self.file);
|
||||||
|
|
||||||
|
if offset != 0 || length != self.md.len() {
|
||||||
|
res.status(StatusCode::PARTIAL_CONTENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
res.body(SizedStream::new(length, reader))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if `req` has no `If-Match` header or one which matches `etag`.
|
||||||
|
fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||||
|
match req.get_header::<header::IfMatch>() {
|
||||||
|
None | Some(header::IfMatch::Any) => true,
|
||||||
|
|
||||||
|
Some(header::IfMatch::Items(ref items)) => {
|
||||||
|
if let Some(some_etag) = etag {
|
||||||
|
for item in items {
|
||||||
|
if item.strong_eq(some_etag) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if `req` doesn't have an `If-None-Match` header matching `req`.
|
||||||
|
fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||||
|
match req.get_header::<header::IfNoneMatch>() {
|
||||||
|
Some(header::IfNoneMatch::Any) => false,
|
||||||
|
|
||||||
|
Some(header::IfNoneMatch::Items(ref items)) => {
|
||||||
|
if let Some(some_etag) = etag {
|
||||||
|
for item in items {
|
||||||
|
if item.weak_eq(some_etag) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
None => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Responder for NamedFile {
|
||||||
|
type Body = BoxBody;
|
||||||
|
|
||||||
|
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
|
||||||
|
self.into_response(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServiceFactory<ServiceRequest> for NamedFile {
|
||||||
|
type Response = ServiceResponse;
|
||||||
|
type Error = Error;
|
||||||
|
type Config = ();
|
||||||
|
type Service = NamedFileService;
|
||||||
|
type InitError = ();
|
||||||
|
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||||
|
|
||||||
|
fn new_service(&self, _: ()) -> Self::Future {
|
||||||
|
let service = NamedFileService {
|
||||||
|
path: self.path.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Box::pin(async move { Ok(service) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NamedFileService {
|
||||||
|
path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Service<ServiceRequest> for NamedFileService {
|
||||||
|
type Response = ServiceResponse;
|
||||||
|
type Error = Error;
|
||||||
|
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||||
|
|
||||||
|
dev::always_ready!();
|
||||||
|
|
||||||
|
fn call(&self, req: ServiceRequest) -> Self::Future {
|
||||||
|
let (req, _) = req.into_parts();
|
||||||
|
|
||||||
|
let path = self.path.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let file = NamedFile::open_async(path).await?;
|
||||||
|
let res = file.into_response(&req);
|
||||||
|
Ok(ServiceResponse::new(req, res))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HttpServiceFactory for NamedFile {
|
||||||
|
fn register(self, config: &mut AppService) {
|
||||||
|
config.register_service(
|
||||||
|
ResourceDef::root_prefix(self.path.to_string_lossy().as_ref()),
|
||||||
|
None,
|
||||||
|
self,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
cmp, fmt,
|
||||||
|
future::Future,
|
||||||
|
io,
|
||||||
|
pin::Pin,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
|
||||||
|
use actix_web::{error::Error, web::Bytes};
|
||||||
|
use futures_core::{ready, Stream};
|
||||||
|
use pin_project_lite::pin_project;
|
||||||
|
|
||||||
|
use super::named::File;
|
||||||
|
|
||||||
|
pin_project! {
|
||||||
|
/// Adapter to read a `std::file::File` in chunks.
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub struct ChunkedReadFile<F, Fut> {
|
||||||
|
size: u64,
|
||||||
|
offset: u64,
|
||||||
|
#[pin]
|
||||||
|
state: ChunkedReadFileState<Fut>,
|
||||||
|
counter: u64,
|
||||||
|
callback: F,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pin_project! {
|
||||||
|
#[project = ChunkedReadFileStateProj]
|
||||||
|
#[project_replace = ChunkedReadFileStateProjReplace]
|
||||||
|
enum ChunkedReadFileState<Fut> {
|
||||||
|
File { file: Option<File>, },
|
||||||
|
Future { #[pin] fut: Fut },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F, Fut> fmt::Debug for ChunkedReadFile<F, Fut> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.write_str("ChunkedReadFile")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_chunked_read(
|
||||||
|
size: u64,
|
||||||
|
offset: u64,
|
||||||
|
file: File,
|
||||||
|
) -> impl Stream<Item = Result<Bytes, Error>> {
|
||||||
|
ChunkedReadFile {
|
||||||
|
size,
|
||||||
|
offset,
|
||||||
|
state: ChunkedReadFileState::File { file: Some(file) },
|
||||||
|
counter: 0,
|
||||||
|
callback: chunked_read_file_callback,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn chunked_read_file_callback(
|
||||||
|
mut file: File,
|
||||||
|
offset: u64,
|
||||||
|
max_bytes: usize,
|
||||||
|
) -> Result<(File, Bytes), Error> {
|
||||||
|
use io::{Read as _, Seek as _};
|
||||||
|
|
||||||
|
let res = actix_web::web::block(move || {
|
||||||
|
let mut buf = Vec::with_capacity(max_bytes);
|
||||||
|
|
||||||
|
file.seek(io::SeekFrom::Start(offset))?;
|
||||||
|
|
||||||
|
let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
|
||||||
|
|
||||||
|
if n_bytes == 0 {
|
||||||
|
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
|
||||||
|
} else {
|
||||||
|
Ok((file, Bytes::from(buf)))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await??;
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
|
||||||
|
where
|
||||||
|
F: Fn(File, u64, usize) -> Fut,
|
||||||
|
Fut: Future<Output = Result<(File, Bytes), Error>>,
|
||||||
|
{
|
||||||
|
type Item = Result<Bytes, Error>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
let mut this = self.as_mut().project();
|
||||||
|
match this.state.as_mut().project() {
|
||||||
|
ChunkedReadFileStateProj::File { file } => {
|
||||||
|
let size = *this.size;
|
||||||
|
let offset = *this.offset;
|
||||||
|
let counter = *this.counter;
|
||||||
|
|
||||||
|
if size == counter {
|
||||||
|
Poll::Ready(None)
|
||||||
|
} else {
|
||||||
|
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
|
||||||
|
|
||||||
|
let file = file
|
||||||
|
.take()
|
||||||
|
.expect("ChunkedReadFile polled after completion");
|
||||||
|
|
||||||
|
let fut = (this.callback)(file, offset, max_bytes);
|
||||||
|
|
||||||
|
this.state
|
||||||
|
.project_replace(ChunkedReadFileState::Future { fut });
|
||||||
|
|
||||||
|
self.poll_next(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ChunkedReadFileStateProj::Future { fut } => {
|
||||||
|
let (file, bytes) = ready!(fut.poll(cx))?;
|
||||||
|
|
||||||
|
this.state
|
||||||
|
.project_replace(ChunkedReadFileState::File { file: Some(file) });
|
||||||
|
|
||||||
|
*this.offset += bytes.len() as u64;
|
||||||
|
*this.counter += bytes.len() as u64;
|
||||||
|
|
||||||
|
Poll::Ready(Some(Ok(bytes)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
45
src/file.rs
Normal file
45
src/file.rs
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
use std::{fs::File, task::Waker, io::Write, path::PathBuf};
|
||||||
|
|
||||||
|
pub trait LiveWriter: Write {
|
||||||
|
fn add_waker(&mut self, waker: Waker);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A simple wrapper for a file that can be read while we're still appending data
|
||||||
|
pub struct LiveFileWriter {
|
||||||
|
file: File,
|
||||||
|
/// Wake handles for contexts that are waiting for us to write more
|
||||||
|
wakers: Vec<Waker>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LiveFileWriter {
|
||||||
|
pub fn new(path: &PathBuf) -> std::io::Result<Self> {
|
||||||
|
Ok(Self {
|
||||||
|
file: File::options().write(true).create_new(true).open(path)?,
|
||||||
|
wakers: Vec::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LiveWriter for LiveFileWriter {
|
||||||
|
fn add_waker(&mut self, waker: Waker) {
|
||||||
|
self.wakers.push(waker);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Write for LiveFileWriter {
|
||||||
|
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||||
|
let result = self.file.write(buf);
|
||||||
|
if let Ok(n) = result {
|
||||||
|
if n > 0 {
|
||||||
|
for waker in self.wakers.drain(..) {
|
||||||
|
waker.wake();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self) -> std::io::Result<()> {
|
||||||
|
self.file.flush()
|
||||||
|
}
|
||||||
|
}
|
82
src/main.rs
Normal file
82
src/main.rs
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
//mod download;
|
||||||
|
mod file;
|
||||||
|
mod upload;
|
||||||
|
mod zip;
|
||||||
|
|
||||||
|
use std::{collections::HashMap, task::Waker, sync::{mpsc::Sender, RwLock}, path::PathBuf};
|
||||||
|
|
||||||
|
use actix::Addr;
|
||||||
|
use actix_web::{
|
||||||
|
get, middleware::Logger, web, App, HttpResponse, HttpServer,
|
||||||
|
Responder, HttpRequest,
|
||||||
|
};
|
||||||
|
use actix_web_actors::ws;
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
|
pub struct UploadedFile {
|
||||||
|
name: String,
|
||||||
|
size: usize,
|
||||||
|
modtime: OffsetDateTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UploadedFile {
|
||||||
|
fn new(name: &str, size: usize, modtime: OffsetDateTime) -> Self {
|
||||||
|
Self {
|
||||||
|
name: sanitise_file_name::sanitise(name),
|
||||||
|
size,
|
||||||
|
modtime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub struct DownloadableFile {
|
||||||
|
name: String,
|
||||||
|
size: usize,
|
||||||
|
modtime: OffsetDateTime,
|
||||||
|
uploader: Option<Addr<upload::Uploader>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
type AppData = web::Data<RwLock<HashMap<String, DownloadableFile>>>;
|
||||||
|
|
||||||
|
fn storage_dir() -> PathBuf {
|
||||||
|
PathBuf::from(std::env::var("STORAGE_DIR").unwrap_or_else(|_| String::from("storage")))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn app_name() -> String {
|
||||||
|
std::env::var("APP_NAME").unwrap_or_else(|_| String::from("transbeam"))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/upload")]
|
||||||
|
async fn upload_socket(
|
||||||
|
req: HttpRequest,
|
||||||
|
stream: web::Payload,
|
||||||
|
data: AppData,
|
||||||
|
) -> impl Responder {
|
||||||
|
ws::start(
|
||||||
|
upload::Uploader::new(data),
|
||||||
|
&req,
|
||||||
|
stream
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::main]
|
||||||
|
async fn main() -> std::io::Result<()> {
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
|
let ip = "0.0.0.0:3000";
|
||||||
|
|
||||||
|
let data: AppData = web::Data::new(RwLock::new(HashMap::new()));
|
||||||
|
|
||||||
|
HttpServer::new(move || {
|
||||||
|
App::new()
|
||||||
|
.app_data(data.clone())
|
||||||
|
.wrap(Logger::default())
|
||||||
|
.service(upload_socket)
|
||||||
|
.service(actix_files::Files::new("/", "./static").index_file("index.html"))
|
||||||
|
})
|
||||||
|
.bind(ip)?
|
||||||
|
.run()
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
228
src/upload.rs
Normal file
228
src/upload.rs
Normal file
|
@ -0,0 +1,228 @@
|
||||||
|
use std::{collections::HashSet, fmt::Display, io::Write};
|
||||||
|
|
||||||
|
use actix::{Actor, StreamHandler, ActorContext, AsyncContext};
|
||||||
|
use actix_http::ws::{Item, CloseReason};
|
||||||
|
use actix_web_actors::ws::{self, CloseCode};
|
||||||
|
use log::{error, debug, info, trace};
|
||||||
|
use rand::distributions::{Alphanumeric, DistString};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
|
use crate::{UploadedFile, DownloadableFile, file::LiveWriter};
|
||||||
|
|
||||||
|
const FILENAME_DATE_FORMAT: &[time::format_description::FormatItem] =
|
||||||
|
time::macros::format_description!("[year]-[month]-[day]-[hour][minute][second]");
|
||||||
|
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
enum Error {
|
||||||
|
#[error("Failed to parse file metadata")]
|
||||||
|
Parse(#[from] serde_json::Error),
|
||||||
|
#[error("Error writing to stored file")]
|
||||||
|
Storage(#[from] std::io::Error),
|
||||||
|
#[error("Time formatting error")]
|
||||||
|
TimeFormat(#[from] time::error::Format),
|
||||||
|
#[error("Lock on app state is poisoned")]
|
||||||
|
LockPoisoned,
|
||||||
|
#[error("Duplicate filename could not be deduplicated")]
|
||||||
|
DuplicateFilename,
|
||||||
|
#[error("This message type was not expected at this stage")]
|
||||||
|
UnexpectedMessageType,
|
||||||
|
#[error("Metadata contained an empty list of files")]
|
||||||
|
NoFiles,
|
||||||
|
#[error("Websocket was closed by client before completing transfer")]
|
||||||
|
ClosedEarly(Option<CloseReason>),
|
||||||
|
#[error("Client sent more data than they were supposed to")]
|
||||||
|
TooMuchData,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
fn close_code(&self) -> CloseCode {
|
||||||
|
match self {
|
||||||
|
Self::Parse(_) => CloseCode::Invalid,
|
||||||
|
Self::Storage(_) => CloseCode::Error,
|
||||||
|
Self::TimeFormat(_) => CloseCode::Error,
|
||||||
|
Self::LockPoisoned => CloseCode::Error,
|
||||||
|
Self::DuplicateFilename => CloseCode::Policy,
|
||||||
|
Self::UnexpectedMessageType => CloseCode::Invalid,
|
||||||
|
Self::NoFiles => CloseCode::Policy,
|
||||||
|
Self::ClosedEarly(_) => CloseCode::Invalid,
|
||||||
|
Self::TooMuchData => CloseCode::Invalid,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Uploader {
|
||||||
|
writer: Option<Box<dyn LiveWriter>>,
|
||||||
|
app_data: super::AppData,
|
||||||
|
bytes_remaining: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Uploader {
|
||||||
|
pub fn new(app_data: super::AppData) -> Self {
|
||||||
|
Self {
|
||||||
|
writer: None,
|
||||||
|
app_data,
|
||||||
|
bytes_remaining: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Actor for Uploader {
|
||||||
|
type Context = ws::WebsocketContext<Self>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct RawUploadedFile {
|
||||||
|
name: String,
|
||||||
|
size: usize,
|
||||||
|
modtime: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RawUploadedFile {
|
||||||
|
fn process(&self) -> UploadedFile {
|
||||||
|
UploadedFile::new(
|
||||||
|
&self.name,
|
||||||
|
self.size,
|
||||||
|
OffsetDateTime::from_unix_timestamp(self.modtime / 1000)
|
||||||
|
.unwrap_or_else(|_| OffsetDateTime::now_utc()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StreamHandler<Result<ws::Message, ws::ProtocolError>> for Uploader {
|
||||||
|
fn handle(&mut self, msg: Result<ws::Message, ws::ProtocolError>, ctx: &mut Self::Context) {
|
||||||
|
let msg = match msg {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Websocket error: {:?}", e);
|
||||||
|
ctx.stop();
|
||||||
|
return;
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.handle_message(msg, ctx) {
|
||||||
|
Err(e) => {
|
||||||
|
error!("{:?}", e);
|
||||||
|
ctx.close(Some(ws::CloseReason {
|
||||||
|
code: e.close_code(),
|
||||||
|
description: Some(e.to_string()),
|
||||||
|
}));
|
||||||
|
ctx.stop();
|
||||||
|
}
|
||||||
|
Ok(true) => {
|
||||||
|
info!("Finished uploading data");
|
||||||
|
self.writer.as_mut().map(|w| w.flush());
|
||||||
|
ctx.close(Some(ws::CloseReason {
|
||||||
|
code: CloseCode::Normal,
|
||||||
|
description: None,
|
||||||
|
}));
|
||||||
|
// self.app_data.write().unwrap().entry(
|
||||||
|
ctx.stop();
|
||||||
|
}
|
||||||
|
_ => ()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ack(ctx: &mut <Uploader as Actor>::Context) {
|
||||||
|
ctx.text("ack");
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Uploader {
|
||||||
|
fn handle_message(&mut self, msg: ws::Message, ctx: &mut <Self as Actor>::Context) -> Result<bool, Error>{
|
||||||
|
trace!("Websocket message: {:?}", msg);
|
||||||
|
match msg {
|
||||||
|
ws::Message::Text(text) => {
|
||||||
|
if self.writer.is_some() {
|
||||||
|
return Err(Error::UnexpectedMessageType)
|
||||||
|
}
|
||||||
|
let raw_files: Vec<RawUploadedFile> = serde_json::from_slice(text.as_bytes())?;
|
||||||
|
info!("Received file list: {} files", raw_files.len());
|
||||||
|
debug!("{:?}", raw_files);
|
||||||
|
let mut filenames: HashSet<String> = HashSet::new();
|
||||||
|
let mut files = Vec::new();
|
||||||
|
for raw_file in raw_files.iter() {
|
||||||
|
let mut file = raw_file.process();
|
||||||
|
while filenames.contains(&file.name) {
|
||||||
|
info!("Duplicate file name: {}", file.name);
|
||||||
|
if file.name.len() >= sanitise_file_name::Options::DEFAULT.length_limit {
|
||||||
|
return Err(Error::DuplicateFilename);
|
||||||
|
}
|
||||||
|
file.name.insert(0, '_');
|
||||||
|
}
|
||||||
|
filenames.insert(file.name.clone());
|
||||||
|
self.bytes_remaining += file.size;
|
||||||
|
files.push(file);
|
||||||
|
}
|
||||||
|
if files.is_empty() {
|
||||||
|
return Err(Error::NoFiles);
|
||||||
|
}
|
||||||
|
let storage_filename = Alphanumeric.sample_string(&mut rand::thread_rng(), 8);
|
||||||
|
let storage_path = super::storage_dir().join(storage_filename.clone());
|
||||||
|
info!("storing to: {:?}", storage_path);
|
||||||
|
let writer = super::file::LiveFileWriter::new(&storage_path)?;
|
||||||
|
if files.len() > 1 {
|
||||||
|
info!("Wrapping in zipfile generator");
|
||||||
|
let now = OffsetDateTime::now_utc();
|
||||||
|
let writer = super::zip::ZipGenerator::new(files, Box::new(writer));
|
||||||
|
let size = writer.total_size();
|
||||||
|
self.writer = Some(Box::new(writer));
|
||||||
|
let download_filename = super::app_name()
|
||||||
|
+ &now.format(FILENAME_DATE_FORMAT)?
|
||||||
|
+ ".zip";
|
||||||
|
let modtime = now;
|
||||||
|
self.app_data.write().map_err(|_| Error::LockPoisoned)?.insert(storage_filename, DownloadableFile {
|
||||||
|
name: download_filename,
|
||||||
|
size,
|
||||||
|
modtime,
|
||||||
|
uploader: Some(ctx.address()),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
self.writer = Some(Box::new(writer));
|
||||||
|
self.app_data.write().map_err(|_| Error::LockPoisoned)?.insert(storage_filename, DownloadableFile {
|
||||||
|
name: files[0].name.clone(),
|
||||||
|
size: files[0].size,
|
||||||
|
modtime: files[0].modtime,
|
||||||
|
uploader: Some(ctx.address()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
ack(ctx);
|
||||||
|
}
|
||||||
|
ws::Message::Binary(data)
|
||||||
|
| ws::Message::Continuation(Item::FirstBinary(data))
|
||||||
|
| ws::Message::Continuation(Item::Continue(data))
|
||||||
|
| ws::Message::Continuation(Item::Last(data)) =>
|
||||||
|
{
|
||||||
|
if let Some(ref mut writer) = self.writer {
|
||||||
|
if data.len() > self.bytes_remaining {
|
||||||
|
return Err(Error::TooMuchData);
|
||||||
|
}
|
||||||
|
self.bytes_remaining -= data.len();
|
||||||
|
writer.write_all(&data)?;
|
||||||
|
ack(ctx);
|
||||||
|
if self.bytes_remaining == 0 {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(Error::UnexpectedMessageType);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ws::Message::Close(reason) => {
|
||||||
|
if self.bytes_remaining > 0 {
|
||||||
|
return Err(Error::ClosedEarly(reason));
|
||||||
|
} else {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ws::Message::Ping(ping) => {
|
||||||
|
debug!("Ping received, ponging");
|
||||||
|
ctx.pong(&ping);
|
||||||
|
}
|
||||||
|
ws::Message::Nop | ws::Message::Pong(_) => (),
|
||||||
|
_ => {
|
||||||
|
return Err(Error::UnexpectedMessageType);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
}
|
329
src/zip.rs
Normal file
329
src/zip.rs
Normal file
|
@ -0,0 +1,329 @@
|
||||||
|
use std::io::Write;
|
||||||
|
use std::task::Waker;
|
||||||
|
|
||||||
|
use crc32fast::Hasher;
|
||||||
|
use log::debug;
|
||||||
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
|
use crate::UploadedFile;
|
||||||
|
use crate::file::LiveWriter;
|
||||||
|
|
||||||
|
const SIGNATURE_SIZE: usize = 4;
|
||||||
|
const SHARED_FIELDS_SIZE: usize = 26;
|
||||||
|
const EXTRA_FIELD_SIZE: usize = 41;
|
||||||
|
const LOCAL_HEADER_SIZE_MINUS_FILENAME: usize = SIGNATURE_SIZE + SHARED_FIELDS_SIZE + EXTRA_FIELD_SIZE;
|
||||||
|
const DATA_DESCRIPTOR_SIZE: usize = 24;
|
||||||
|
const FILE_ENTRY_SIZE_MINUS_FILENAME_AND_FILE: usize = LOCAL_HEADER_SIZE_MINUS_FILENAME + DATA_DESCRIPTOR_SIZE;
|
||||||
|
|
||||||
|
const CENTRAL_DIRECTORY_HEADER_SIZE_MINUS_FILENAME: usize = SIGNATURE_SIZE + 2 + SHARED_FIELDS_SIZE + 14 + EXTRA_FIELD_SIZE;
|
||||||
|
|
||||||
|
const EOCD64_RECORD_SIZE: usize = 56;
|
||||||
|
const EOCD64_LOCATOR_SIZE: usize = 20;
|
||||||
|
const EOCD_RECORD_SIZE: usize = 22;
|
||||||
|
const EOCD_TOTAL_SIZE: usize = EOCD64_RECORD_SIZE + EOCD64_LOCATOR_SIZE + EOCD_RECORD_SIZE;
|
||||||
|
|
||||||
|
const EMPTY_STRING_CRC32: u32 = 0;
|
||||||
|
|
||||||
|
fn file_entry_size(file: &UploadedFile) -> usize {
|
||||||
|
FILE_ENTRY_SIZE_MINUS_FILENAME_AND_FILE + file.name.len() + file.size
|
||||||
|
}
|
||||||
|
|
||||||
|
fn file_entries_size(files: &[UploadedFile]) -> usize {
|
||||||
|
let mut total = 0;
|
||||||
|
for file in files.iter() {
|
||||||
|
total += file_entry_size(file)
|
||||||
|
}
|
||||||
|
total
|
||||||
|
}
|
||||||
|
|
||||||
|
fn central_directory_size(files: &[UploadedFile]) -> usize {
|
||||||
|
let mut total = 0;
|
||||||
|
for file in files.iter() {
|
||||||
|
total += CENTRAL_DIRECTORY_HEADER_SIZE_MINUS_FILENAME + file.name.len();
|
||||||
|
}
|
||||||
|
total
|
||||||
|
}
|
||||||
|
|
||||||
|
fn zipfile_size(files: &[UploadedFile]) -> usize {
|
||||||
|
file_entries_size(files) + central_directory_size(files) + EOCD_TOTAL_SIZE
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fat_timestamp(time: OffsetDateTime) -> u32 {
|
||||||
|
(((time.year() - 1980) as u32) << 25)
|
||||||
|
| ((time.month() as u32) << 21)
|
||||||
|
| ((time.day() as u32) << 16)
|
||||||
|
| ((time.hour() as u32) << 11)
|
||||||
|
| ((time.minute() as u32) << 5)
|
||||||
|
| ((time.second() as u32) >> 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Append a value to a byte vector as little-endian bytes
|
||||||
|
fn append_value(data: &mut Vec<u8>, mut value: u64, len: usize) {
|
||||||
|
data.resize_with(data.len() + len, || { let byte = value as u8; value >>= 8; byte });
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_repeated_byte(data: &mut Vec<u8>, byte: u8, count: usize) {
|
||||||
|
data.resize(data.len() + count, byte);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_0(data: &mut Vec<u8>, count: usize) {
|
||||||
|
append_repeated_byte(data, 0, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_ff(data: &mut Vec<u8>, count: usize) {
|
||||||
|
append_repeated_byte(data, 0xff, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UploadedFile {
|
||||||
|
/// Returns the fields shared by the ZIP local file header and
|
||||||
|
/// central directory file header - "Version needed to extract"
|
||||||
|
/// through "Extra field length".
|
||||||
|
fn shared_header_fields(&self, hash: Option<u32>) -> Vec<u8> {
|
||||||
|
let mut fields = vec![
|
||||||
|
45, 0, // Minimum version required to extract: 4.5 for ZIP64 extensions
|
||||||
|
0b00001000, 0, // General purpose bit flag: size and CRC-32 in data descriptor
|
||||||
|
0, 0, // Compression method: none
|
||||||
|
];
|
||||||
|
append_value(&mut fields, fat_timestamp(self.modtime) as u64, 4);
|
||||||
|
// Use 0s as a placeholder if the CRC-32 hash isn't known yet
|
||||||
|
append_value(&mut fields, hash.unwrap_or(0) as u64, 4);
|
||||||
|
// Placeholders for compressed and uncompressed size in ZIP64 record, 4 bytes each
|
||||||
|
append_ff(&mut fields, 8);
|
||||||
|
append_value(&mut fields, self.name.len() as u64, 2);
|
||||||
|
// Extra field length: 32 bytes for zip64, 9 bytes for timestamp
|
||||||
|
fields.append(&mut vec![41, 0]);
|
||||||
|
fields
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extra_field(&self, local_header_offset: usize) -> Vec<u8> {
|
||||||
|
let mut field = vec![
|
||||||
|
0x01, 0x00, // Zip64 extended information
|
||||||
|
28, 0, // 28 bytes of data
|
||||||
|
];
|
||||||
|
// Original size and compressed size - if this is in the local
|
||||||
|
// header, we're supposed to leave these blank and point to
|
||||||
|
// the data descriptor, but I'm assuming it won't hurt to fill
|
||||||
|
// them in regardless
|
||||||
|
append_value(&mut field, self.size as u64, 8);
|
||||||
|
append_value(&mut field, self.size as u64, 8);
|
||||||
|
append_value(&mut field, local_header_offset as u64, 8);
|
||||||
|
append_0(&mut field, 4); // File starts on disk 0, there's no other disk
|
||||||
|
|
||||||
|
field.append(&mut vec![
|
||||||
|
0x55, 0x54, // Extended timestamp
|
||||||
|
5, 0, // 5 bytes of data
|
||||||
|
0b00000001, // Flags: Only modification time is present
|
||||||
|
]);
|
||||||
|
append_value(&mut field, self.modtime.unix_timestamp() as u64, 4);
|
||||||
|
|
||||||
|
field
|
||||||
|
}
|
||||||
|
|
||||||
|
fn local_header(&self, local_header_offset: usize) -> Vec<u8> {
|
||||||
|
let mut header = vec![0x50, 0x4b, 0x03, 0x04]; // Local file header signature
|
||||||
|
header.append(&mut self.shared_header_fields(None));
|
||||||
|
header.append(&mut self.name.clone().into_bytes());
|
||||||
|
header.append(&mut self.extra_field(local_header_offset));
|
||||||
|
header
|
||||||
|
}
|
||||||
|
|
||||||
|
fn central_directory_header(&self, local_header_offset: usize, hash: u32) -> Vec<u8> {
|
||||||
|
let mut header = vec![
|
||||||
|
0x50, 0x4b, 0x01, 0x02, // Central directory file header signature
|
||||||
|
45, 3, // Made by a Unix system supporting version 4.5
|
||||||
|
];
|
||||||
|
header.append(&mut self.shared_header_fields(Some(hash)));
|
||||||
|
header.append(&mut vec![
|
||||||
|
0, 0, // File comment length: 0
|
||||||
|
0, 0, // Disk number where file starts: 0
|
||||||
|
0, 0, // Internal file attributes: nothing
|
||||||
|
0, 0, 0, 0, // External file attributes: nothing
|
||||||
|
0xff, 0xff, 0xff, 0xff, // Relative offset of local file header: placeholder, see ZIP64 data
|
||||||
|
]);
|
||||||
|
header.append(&mut self.name.clone().into_bytes());
|
||||||
|
header.append(&mut self.extra_field(local_header_offset));
|
||||||
|
header
|
||||||
|
}
|
||||||
|
|
||||||
|
fn data_descriptor(&self, hash: u32) -> Vec<u8> {
|
||||||
|
let mut descriptor = vec![0x50, 0x4b, 0x07, 0x08]; // Data descriptor signature
|
||||||
|
append_value(&mut descriptor, hash as u64, 4);
|
||||||
|
// Compressed and uncompressed sizes
|
||||||
|
append_value(&mut descriptor, self.size as u64, 8);
|
||||||
|
append_value(&mut descriptor, self.size as u64, 8);
|
||||||
|
descriptor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn end_of_central_directory(files: &[UploadedFile]) -> Vec<u8> {
|
||||||
|
let entries_size = file_entries_size(files) as u64;
|
||||||
|
let directory_size = central_directory_size(files) as u64;
|
||||||
|
|
||||||
|
let mut eocd = vec![
|
||||||
|
0x50, 0x4b, 0x06, 0x06, // EOCD64 record signature
|
||||||
|
44, // Size of remaining EOCD64 record
|
||||||
|
];
|
||||||
|
append_0(&mut eocd, 7); // pad out the rest of the size field
|
||||||
|
eocd.append(&mut vec![
|
||||||
|
45, 3, // Made by a Unix system supporting version 4.5
|
||||||
|
45, 0, // Minimum version 4.5 to extract
|
||||||
|
]);
|
||||||
|
append_0(&mut eocd, 8); // Two 4-byte disk numbers, both 0
|
||||||
|
// Number of central directory records, on this disk and in total
|
||||||
|
append_value(&mut eocd, files.len() as u64, 8);
|
||||||
|
append_value(&mut eocd, files.len() as u64, 8);
|
||||||
|
append_value(&mut eocd, directory_size, 8);
|
||||||
|
append_value(&mut eocd, entries_size, 8); // Offset of start of central directory
|
||||||
|
|
||||||
|
eocd.append(&mut vec![0x50, 0x4b, 0x06, 0x07]); // EOCD64 locator signature
|
||||||
|
append_0(&mut eocd, 4); // disk number
|
||||||
|
append_value(&mut eocd, entries_size + directory_size, 8); // EOCD64 record offset
|
||||||
|
append_0(&mut eocd, 4); // total number of disks;
|
||||||
|
|
||||||
|
eocd.append(&mut vec![0x50, 0x4b, 0x05, 0x06]); // EOCD record signature
|
||||||
|
append_ff(&mut eocd, 16); // Zip64 placeholders for disk numbers, record counts, and offsets
|
||||||
|
append_0(&mut eocd, 2); // Comment length: 0
|
||||||
|
|
||||||
|
eocd
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ZipGenerator<'a> {
|
||||||
|
files: Vec<UploadedFile>,
|
||||||
|
file_index: usize,
|
||||||
|
byte_index: usize,
|
||||||
|
pending_metadata: Vec<u8>,
|
||||||
|
hasher: Hasher,
|
||||||
|
hashes: Vec<u32>,
|
||||||
|
output: Box<dyn LiveWriter + 'a>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ZipGenerator<'a> {
|
||||||
|
pub fn new(files: Vec<UploadedFile>, output: Box<dyn LiveWriter + 'a>) -> Self {
|
||||||
|
let mut result = Self {
|
||||||
|
files,
|
||||||
|
file_index: 0,
|
||||||
|
byte_index: 0,
|
||||||
|
pending_metadata: vec![],
|
||||||
|
hasher: Hasher::new(),
|
||||||
|
hashes: vec![],
|
||||||
|
output,
|
||||||
|
};
|
||||||
|
result.start_new_file();
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn total_size(&self) -> usize {
|
||||||
|
zipfile_size(&self.files)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish_file(&mut self) {
|
||||||
|
let hash = std::mem::replace(&mut self.hasher, Hasher::new()).finalize();
|
||||||
|
self.hashes.push(hash);
|
||||||
|
self.pending_metadata.append(&mut self.files[self.file_index].data_descriptor(hash));
|
||||||
|
debug!("Finishing file entry in zipfile: {}, hash {}", self.files[self.file_index].name, hash);
|
||||||
|
self.file_index += 1;
|
||||||
|
self.start_new_file();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_new_file(&mut self) {
|
||||||
|
let mut offset = file_entries_size(&self.files[..self.file_index]);
|
||||||
|
while self.file_index < self.files.len() && self.files[self.file_index].size == 0 {
|
||||||
|
debug!("Empty file entry in zipfile: {}", self.files[self.file_index].name);
|
||||||
|
self.hashes.push(EMPTY_STRING_CRC32);
|
||||||
|
let mut local_header = self.files[self.file_index].local_header(offset);
|
||||||
|
let mut data_descriptor = self.files[self.file_index].data_descriptor(EMPTY_STRING_CRC32);
|
||||||
|
offset += local_header.len() + data_descriptor.len();
|
||||||
|
self.file_index += 1;
|
||||||
|
self.pending_metadata.append(&mut local_header);
|
||||||
|
self.pending_metadata.append(&mut data_descriptor);
|
||||||
|
}
|
||||||
|
if self.file_index < self.files.len() {
|
||||||
|
debug!("Starting file entry in zipfile: {}", self.files[self.file_index].name);
|
||||||
|
self.byte_index = 0;
|
||||||
|
self.pending_metadata.append(&mut self.files[self.file_index].local_header(offset));
|
||||||
|
} else {
|
||||||
|
self.finish_zipfile();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn finish_zipfile(&mut self) {
|
||||||
|
debug!("Writing zipfile central directory");
|
||||||
|
let mut offset = 0;
|
||||||
|
for (i, file) in self.files.iter().enumerate() {
|
||||||
|
debug!("Writing central directory entry: {}, hash {}", file.name, self.hashes[i]);
|
||||||
|
self.pending_metadata.append(&mut file.central_directory_header(offset, self.hashes[i]));
|
||||||
|
offset += file_entry_size(file);
|
||||||
|
}
|
||||||
|
debug!("Writing end of central directory");
|
||||||
|
self.pending_metadata.append(&mut end_of_central_directory(&self.files));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> LiveWriter for ZipGenerator<'a> {
|
||||||
|
fn add_waker(&mut self, waker: Waker) {
|
||||||
|
self.output.add_waker(waker);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Write for ZipGenerator<'a> {
|
||||||
|
fn write(&mut self, mut buf: &[u8]) -> std::io::Result<usize> {
|
||||||
|
while !self.pending_metadata.is_empty() {
|
||||||
|
let result = self.output.write(self.pending_metadata.as_slice());
|
||||||
|
match result {
|
||||||
|
Ok(0) | Err(_) => { return result; }
|
||||||
|
Ok(n) => { self.pending_metadata.drain(..n); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.file_index >= self.files.len() {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
let bytes_remaining = self.files[self.file_index].size - self.byte_index;
|
||||||
|
if bytes_remaining < buf.len() {
|
||||||
|
buf = &buf[..bytes_remaining];
|
||||||
|
}
|
||||||
|
let result = self.output.write(buf);
|
||||||
|
match result {
|
||||||
|
Ok(0) | Err(_) => (),
|
||||||
|
Ok(n) => {
|
||||||
|
self.hasher.update(&buf[..n]);
|
||||||
|
self.byte_index += n;
|
||||||
|
if n == bytes_remaining {
|
||||||
|
self.finish_file();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&mut self) -> std::io::Result<()> {
|
||||||
|
debug!("Flushing zipfile writer");
|
||||||
|
if !self.pending_metadata.is_empty() {
|
||||||
|
self.output.write_all(self.pending_metadata.as_slice())?;
|
||||||
|
self.pending_metadata.clear();
|
||||||
|
}
|
||||||
|
self.output.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn test_no_files() {
|
||||||
|
let mut output: Vec<u8> = vec![];
|
||||||
|
{
|
||||||
|
let mut zipgen = ZipGenerator::new(vec![], Box::new(std::io::Cursor::new(&mut output)));
|
||||||
|
zipgen.write_all(&[]).unwrap();
|
||||||
|
zipgen.flush().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
eprintln!("{:?}", &output);
|
||||||
|
{
|
||||||
|
let mut reader = std::io::BufReader::new(output.as_slice());
|
||||||
|
let zipfile = zip::read::read_zipfile_from_stream(&mut reader).unwrap();
|
||||||
|
assert!(zipfile.is_none());
|
||||||
|
}
|
||||||
|
let archive = zip::ZipArchive::new(std::io::Cursor::new(output)).unwrap();
|
||||||
|
assert!(archive.is_empty());
|
||||||
|
}
|
||||||
|
}
|
22
static/index.html
Normal file
22
static/index.html
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8"/>
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
||||||
|
<link rel="stylesheet" type="text/css" href="transbeam.css"/>
|
||||||
|
<title>Upload Test</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div>
|
||||||
|
<label>
|
||||||
|
<span class="fake_button" id="file_input_message">Select files to upload...</span>
|
||||||
|
<input type="file" multiple id="file_input"/>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<button id="upload" disabled>Upload</button>
|
||||||
|
<h2>Files selected:</h2>
|
||||||
|
<ul id="file_list">
|
||||||
|
</ul>
|
||||||
|
<script src="upload.js"></script>
|
||||||
|
</body>
|
||||||
|
</html>
|
24
static/transbeam.css
Normal file
24
static/transbeam.css
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
input[type="file"] {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
button, .fake_button {
|
||||||
|
font-size: 18px;
|
||||||
|
font-family: sans-serif;
|
||||||
|
color: #000;
|
||||||
|
background-color: #ccc;
|
||||||
|
border: 1px solid #bbb;
|
||||||
|
border-radius: 4px;
|
||||||
|
padding: 6px 12px;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:hover, .fake_button:hover {
|
||||||
|
background-color: #aaa;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:disabled, button:disabled:hover {
|
||||||
|
color: #aaa;
|
||||||
|
background-color: #eee;
|
||||||
|
border-color: #ddd;
|
||||||
|
}
|
105
static/upload.js
Normal file
105
static/upload.js
Normal file
|
@ -0,0 +1,105 @@
|
||||||
|
let files = [];
|
||||||
|
|
||||||
|
let socket = null;
|
||||||
|
let fileIndex = 0;
|
||||||
|
let byteIndex = 0;
|
||||||
|
|
||||||
|
function sendMetadata() {
|
||||||
|
const metadata = files.map((file) => ({
|
||||||
|
name: file.name,
|
||||||
|
size: file.size,
|
||||||
|
modtime: file.lastModified,
|
||||||
|
}));
|
||||||
|
socket.send(JSON.stringify(metadata));
|
||||||
|
}
|
||||||
|
|
||||||
|
function finishSending() {
|
||||||
|
if (socket.bufferedAmount > 0) {
|
||||||
|
window.setTimeout(finishSending, 1000);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
socket.close();
|
||||||
|
alert("done");
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendData() {
|
||||||
|
if (fileIndex >= files.length) {
|
||||||
|
finishSending();
|
||||||
|
}
|
||||||
|
const currentFile = files[fileIndex];
|
||||||
|
if (byteIndex < currentFile.size) {
|
||||||
|
const endpoint = Math.min(byteIndex+8192, currentFile.size);
|
||||||
|
const data = currentFile.slice(byteIndex, endpoint);
|
||||||
|
socket.send(data);
|
||||||
|
byteIndex = endpoint;
|
||||||
|
} else {
|
||||||
|
fileIndex += 1;
|
||||||
|
byteIndex = 0;
|
||||||
|
sendData();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const fileInput = document.getElementById('file_input');
|
||||||
|
const fileInputMessage = document.getElementById('file_input_message');
|
||||||
|
const fileList = document.getElementById('file_list');
|
||||||
|
const uploadButton = document.getElementById('upload');
|
||||||
|
|
||||||
|
function updateButtons() {
|
||||||
|
if (files.length === 0) {
|
||||||
|
uploadButton.disabled = true;
|
||||||
|
fileInputMessage.textContent = 'Select files to upload...';
|
||||||
|
} else {
|
||||||
|
uploadButton.disabled = false;
|
||||||
|
fileInputMessage.textContent = 'Select more files to upload...';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
updateButtons();
|
||||||
|
|
||||||
|
function addFile(newFile) {
|
||||||
|
if (files.some((oldFile) => newFile.name === oldFile.name)) { return; }
|
||||||
|
|
||||||
|
files.push(newFile);
|
||||||
|
|
||||||
|
const listEntry = document.createElement('li');
|
||||||
|
const deleteButton = document.createElement('button');
|
||||||
|
deleteButton.textContent = 'x';
|
||||||
|
deleteButton.addEventListener('click', () => {
|
||||||
|
removeFile(newFile.name);
|
||||||
|
listEntry.remove();
|
||||||
|
updateButtons();
|
||||||
|
});
|
||||||
|
const entryName = document.createElement('span');
|
||||||
|
entryName.textContent = newFile.name;
|
||||||
|
listEntry.appendChild(deleteButton);
|
||||||
|
listEntry.appendChild(entryName);
|
||||||
|
|
||||||
|
fileList.appendChild(listEntry);
|
||||||
|
}
|
||||||
|
|
||||||
|
function removeFile(name) {
|
||||||
|
files = files.filter((file) => file.name !== name);
|
||||||
|
}
|
||||||
|
|
||||||
|
fileInput.addEventListener('input', (e) => {
|
||||||
|
for (const file of e.target.files) { addFile(file); }
|
||||||
|
updateButtons();
|
||||||
|
e.target.value = '';
|
||||||
|
});
|
||||||
|
|
||||||
|
uploadButton.addEventListener('click', (e) => {
|
||||||
|
if (files.length === 0) { return; }
|
||||||
|
|
||||||
|
fileInput.disabled = true;
|
||||||
|
for (const button of document.getElementsByTagName('button')) {
|
||||||
|
button.disabled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
socket = new WebSocket('ws://localhost:3000/upload');
|
||||||
|
socket.addEventListener('open', sendMetadata);
|
||||||
|
socket.addEventListener('message', (msg) => {
|
||||||
|
if (msg.data === 'ack') {
|
||||||
|
sendData();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
})
|
Loading…
Reference in a new issue