Remove into-file option + more refactoring

This commit is contained in:
Daniel M 2022-03-31 17:57:02 +02:00
parent 88f23ae568
commit c9ac3dd683
3 changed files with 48 additions and 60 deletions

View File

@ -18,14 +18,6 @@ pub struct CLIArgs {
)] )]
pub outdir: PathBuf, pub outdir: PathBuf,
#[clap(
short = 'i',
long = "into-file",
value_name = "FILENAME",
help = "Force filename. This only works for single file downloads",
)]
pub into_file: Option<PathBuf>,
#[clap( #[clap(
short = 'n', short = 'n',
long = "num-files", long = "num-files",

View File

@ -95,10 +95,7 @@ pub async fn download_feedback_chunks(
) -> Result<()> { ) -> Result<()> {
let mut content_length = match content_length { let mut content_length = match content_length {
Some(it) => it, Some(it) => it,
None => { None => http_get_filesize_and_range_support(url).await?.filesize,
let (content_length, _) = http_get_filesize_and_range_support(url).await?;
content_length
}
}; };
// Send the HTTP request to download the given link // Send the HTTP request to download the given link
@ -226,7 +223,7 @@ pub async fn download_feedback_multi(
) -> Result<()> { ) -> Result<()> {
let content_length = match content_length { let content_length = match content_length {
Some(it) => it, Some(it) => it,
None => http_get_filesize_and_range_support(url).await?.0, None => http_get_filesize_and_range_support(url).await?.filesize,
}; };
// Create zeroed file with 1 byte too much. This will be truncated on download // Create zeroed file with 1 byte too much. This will be truncated on download
@ -379,40 +376,45 @@ pub async fn download_feedback_multi(
async fn create_zeroed_file(file: &Path, filesize: usize) -> Result<()> { async fn create_zeroed_file(file: &Path, filesize: usize) -> Result<()> {
let ofile = tokio::fs::OpenOptions::new() let ofile = tokio::fs::OpenOptions::new()
.create(true) .create(true)
// Open in write mode
.write(true) .write(true)
// Delete and overwrite the file
.truncate(true) .truncate(true)
.open(file) .open(file)
.await?; .await?;
ofile.set_len(filesize as u64).await?; ofile.set_len(filesize as u64).await?;
Ok(()) Ok(())
} }
pub async fn http_get_filesize_and_range_support(url: &str) -> Result<(u64, bool)> { pub struct HttpFileInfo {
pub filesize: u64,
pub range_support: bool,
pub filename: String,
}
pub async fn http_get_filesize_and_range_support(url: &str) -> Result<HttpFileInfo> {
let resp = reqwest::Client::new().head(url).send().await?; let resp = reqwest::Client::new().head(url).send().await?;
if let Some(filesize) = resp.headers().get(reqwest::header::CONTENT_LENGTH) { let filesize = resp
if let Ok(val_str) = filesize.to_str() { .headers()
if let Ok(val) = val_str.parse::<u64>() { .get(reqwest::header::CONTENT_LENGTH)
let mut range_supported = false; .and_then(|it| it.to_str().unwrap().parse::<u64>().ok())
.ok_or(DlError::ContentLengthUnknown)?;
if let Some(range) = resp.headers().get(reqwest::header::ACCEPT_RANGES) { let range = resp
if let Ok(range) = range.to_str() { .headers()
if range == "bytes" { .get(reqwest::header::ACCEPT_RANGES)
range_supported = true; .and_then(|it| it.to_str().ok());
} let range_support = matches!(range, Some("bytes"));
}
}
return Ok((val, range_supported)); let filename = url_to_filename(url);
}
}
}
Err(DlError::ContentLengthUnknown.into()) let info = HttpFileInfo {
filesize,
range_support,
filename,
};
Ok(info)
} }
#[cfg(test)] #[cfg(test)]

View File

@ -7,7 +7,6 @@ use std::{
}; };
use clap::Parser; use clap::Parser;
use download::{download_feedback, download_feedback_multi, http_get_filesize_and_range_support};
use futures::future::join_all; use futures::future::join_all;
use tokio::{ use tokio::{
fs::create_dir_all, fs::create_dir_all,
@ -16,11 +15,12 @@ use tokio::{
Mutex, Mutex,
}, },
}; };
use zippy::is_zippyshare_url;
use crate::{ use crate::{
args::CLIArgs, args::CLIArgs,
dlreport::{watch_and_print_reports, DlReport, DlReporter, DlStatus}, dlreport::{watch_and_print_reports, DlReport, DlReporter, DlStatus},
download::{download_feedback, download_feedback_multi, http_get_filesize_and_range_support},
zippy::is_zippyshare_url,
}; };
use anyhow::Result; use anyhow::Result;
@ -134,71 +134,65 @@ async fn download_job(urls: SyncQueue, reporter: UnboundedSender<DlReport>, cli_
dlreq.url.to_string() dlreq.url.to_string()
}; };
let file_name = cli_args let info = match http_get_filesize_and_range_support(&url).await {
.into_file Ok(it) => it,
.clone()
.unwrap_or_else(|| download::url_to_filename(&url).into());
let into_file: PathBuf = cli_args
.outdir
.join(Path::new(&file_name))
.to_str()
.unwrap()
.to_string()
.into();
let (filesize, range_supported) = match http_get_filesize_and_range_support(&url).await {
Ok((filesize, range_supported)) => (filesize, range_supported),
Err(_e) => { Err(_e) => {
reporter.send(DlStatus::Message(format!( reporter.send(DlStatus::Message(format!(
"Error while querying metadata: {}", "Error while querying metadata: {url}"
url
))); )));
continue; continue;
} }
}; };
let into_file: PathBuf = cli_args
.outdir
.join(Path::new(&info.filename))
.to_str()
.unwrap()
.to_string()
.into();
// If file with same name is present locally, check filesize // If file with same name is present locally, check filesize
if into_file.exists() { if into_file.exists() {
let local_filesize = std::fs::metadata(&into_file).unwrap().len(); let local_filesize = std::fs::metadata(&into_file).unwrap().len();
if filesize == local_filesize { if info.filesize == local_filesize {
reporter.send(DlStatus::Message(format!( reporter.send(DlStatus::Message(format!(
"Skipping file '{}': already present", "Skipping file '{}': already present",
file_name.display() info.filename
))); )));
reporter.send(DlStatus::Skipped); reporter.send(DlStatus::Skipped);
continue; continue;
} else { } else {
reporter.send(DlStatus::Message(format!( reporter.send(DlStatus::Message(format!(
"Replacing file '{}': present but not completed", "Replacing file '{}': present but not completed",
&file_name.display() &info.filename
))); )));
} }
} }
let dl_status = if cli_args.conn_count.get() == 1 { let dl_status = if cli_args.conn_count.get() == 1 {
download_feedback(&url, &into_file, reporter.clone(), Some(filesize)).await download_feedback(&url, &into_file, reporter.clone(), Some(info.filesize)).await
} else if !range_supported { } else if !info.range_support {
reporter.send(DlStatus::Message(format!( reporter.send(DlStatus::Message(format!(
"Server does not support range headers. Downloading with single connection: {}", "Server does not support range headers. Downloading with single connection: {}",
url url
))); )));
download_feedback(&url, &into_file, reporter.clone(), Some(filesize)).await download_feedback(&url, &into_file, reporter.clone(), Some(info.filesize)).await
} else { } else {
download_feedback_multi( download_feedback_multi(
&url, &url,
&into_file, &into_file,
reporter.clone(), reporter.clone(),
cli_args.conn_count.get(), cli_args.conn_count.get(),
Some(filesize), Some(info.filesize),
) )
.await .await
}; };
if dl_status.is_err() { if dl_status.is_err() {
reporter.send(DlStatus::DoneErr { reporter.send(DlStatus::DoneErr {
filename: file_name.to_str().unwrap().to_string(), filename: info.filename,
}); });
} }
} }