mirror of
https://github.com/cargo-bins/cargo-binstall.git
synced 2025-04-24 22:30:03 +00:00
Refactor: Extract new crate binstalk-{signal, downloader} (#518)
* Refactor: Extract new crate binstalk-downloader * Re-export `PkgFmt` from `binstalk_manifests` * Update release-pr.yml * Update dependabot Signed-off-by: Jiahao XU <Jiahao_XU@outlook.com>
This commit is contained in:
parent
3841762a5b
commit
89fa5b1769
21 changed files with 456 additions and 260 deletions
|
@ -11,79 +11,43 @@ license = "GPL-3.0"
|
|||
|
||||
[dependencies]
|
||||
async-trait = "0.1.58"
|
||||
binstalk-downloader = { version = "0.1.0", path = "../binstalk-downloader" }
|
||||
binstalk-manifests = { version = "0.1.0", path = "../binstalk-manifests" }
|
||||
bytes = "1.2.1"
|
||||
bzip2 = "0.4.3"
|
||||
cargo_toml = "0.13.0"
|
||||
compact_str = { version = "0.6.0", features = ["serde"] }
|
||||
crates_io_api = { version = "0.8.1", default-features = false }
|
||||
detect-targets = { version = "0.1.2", path = "../detect-targets" }
|
||||
digest = "0.10.5"
|
||||
flate2 = { version = "1.0.24", default-features = false }
|
||||
futures-util = { version = "0.3.25", default-features = false, features = ["std"] }
|
||||
generic-array = "0.14.6"
|
||||
home = "0.5.4"
|
||||
httpdate = "1.0.2"
|
||||
itertools = "0.10.5"
|
||||
jobslot = { version = "0.2.6", features = ["tokio"] }
|
||||
log = { version = "0.4.17", features = ["std"] }
|
||||
miette = "5.4.1"
|
||||
normalize-path = { version = "0.2.0", path = "../normalize-path" }
|
||||
once_cell = "1.16.0"
|
||||
reqwest = { version = "0.11.12", features = ["stream", "gzip", "brotli", "deflate"], default-features = false }
|
||||
scopeguard = "1.1.0"
|
||||
semver = { version = "1.0.14", features = ["serde"] }
|
||||
serde = { version = "1.0.147", features = ["derive"] }
|
||||
strum = "0.24.1"
|
||||
# Use a fork here since we need PAX support, but the upstream
|
||||
# does not hav the PR merged yet.
|
||||
#
|
||||
#tar = "0.4.38"
|
||||
tar = { package = "binstall-tar", version = "0.4.39" }
|
||||
tempfile = "3.3.0"
|
||||
thiserror = "1.0.37"
|
||||
tinytemplate = "1.2.1"
|
||||
# parking_lot - for OnceCell::const_new
|
||||
tokio = { version = "1.21.2", features = ["macros", "rt", "process", "sync", "signal", "time", "parking_lot"], default-features = false }
|
||||
tower = { version = "0.4.13", features = ["limit", "util"] }
|
||||
trust-dns-resolver = { version = "0.21.2", optional = true, default-features = false, features = ["dnssec-ring"] }
|
||||
# parking_lot for `tokio::sync::OnceCell::const_new`
|
||||
tokio = { version = "1.21.2", features = ["rt", "process", "sync", "signal", "parking_lot"], default-features = false }
|
||||
url = { version = "2.3.1", features = ["serde"] }
|
||||
xz2 = "0.1.7"
|
||||
|
||||
# Disable all features of zip except for features of compression algorithms:
|
||||
# Disabled features include:
|
||||
# - aes-crypto: Enables decryption of files which were encrypted with AES, absolutely zero use for
|
||||
# this crate.
|
||||
# - time: Enables features using the [time](https://github.com/time-rs/time) crate,
|
||||
# which is not used by this crate.
|
||||
zip = { version = "0.6.3", default-features = false, features = ["deflate", "bzip2", "zstd"] }
|
||||
|
||||
# zstd is also depended by zip.
|
||||
# Since zip 0.6.3 depends on zstd 0.11, we also have to use 0.11 here,
|
||||
# otherwise there will be a link conflict.
|
||||
zstd = { version = "0.11.2", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = "0.9.3"
|
||||
|
||||
[features]
|
||||
default = ["static", "rustls"]
|
||||
|
||||
static = ["bzip2/static", "xz2/static"]
|
||||
pkg-config = ["zstd/pkg-config"]
|
||||
static = ["binstalk-downloader/static"]
|
||||
pkg-config = ["binstalk-downloader/pkg-config"]
|
||||
|
||||
zlib-ng = ["flate2/zlib-ng"]
|
||||
zlib-ng = ["binstalk-downloader/zlib-ng"]
|
||||
|
||||
rustls = [
|
||||
"crates_io_api/rustls",
|
||||
"reqwest/rustls-tls",
|
||||
rustls = ["crates_io_api/rustls", "binstalk-downloader/rustls"]
|
||||
native-tls = ["binstalk-downloader/native-tls"]
|
||||
|
||||
# Enable the following features only if trust-dns-resolver is enabled.
|
||||
"trust-dns-resolver?/dns-over-rustls",
|
||||
# trust-dns-resolver currently supports https with rustls
|
||||
"trust-dns-resolver?/dns-over-https-rustls",
|
||||
]
|
||||
native-tls = ["reqwest/native-tls", "trust-dns-resolver?/dns-over-native-tls"]
|
||||
|
||||
# Enable trust-dns-resolver so that features on it will also be enabled.
|
||||
trust-dns = ["trust-dns-resolver", "reqwest/trust-dns"]
|
||||
trust-dns = ["binstalk-downloader/trust-dns"]
|
||||
|
|
|
@ -10,6 +10,7 @@ use crate::{
|
|||
helpers::{
|
||||
download::Download,
|
||||
remote::{Client, Url},
|
||||
signal::wait_on_cancellation_signal,
|
||||
},
|
||||
manifests::cargo_toml_binstall::{Meta, TarBasedFmt},
|
||||
};
|
||||
|
@ -53,7 +54,11 @@ pub async fn fetch_crate_cratesio(
|
|||
|
||||
let manifest_dir_path: PathBuf = format!("{name}-{version_name}").into();
|
||||
|
||||
Download::new(client, Url::parse(&crate_url)?)
|
||||
.and_visit_tar(TarBasedFmt::Tgz, ManifestVisitor::new(manifest_dir_path))
|
||||
.await
|
||||
Ok(Download::new(client, Url::parse(&crate_url)?)
|
||||
.and_visit_tar(
|
||||
TarBasedFmt::Tgz,
|
||||
ManifestVisitor::new(manifest_dir_path),
|
||||
Some(Box::pin(wait_on_cancellation_signal())),
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
use std::{
|
||||
io::Read,
|
||||
io::{self, Read},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use cargo_toml::Manifest;
|
||||
use log::debug;
|
||||
use normalize_path::NormalizePath;
|
||||
use tar::Entries;
|
||||
|
||||
use super::vfs::Vfs;
|
||||
use crate::{
|
||||
errors::BinstallError, helpers::download::TarEntriesVisitor,
|
||||
errors::BinstallError,
|
||||
helpers::download::{DownloadError, Entries, TarEntriesVisitor},
|
||||
manifests::cargo_toml_binstall::Meta,
|
||||
};
|
||||
|
||||
|
@ -37,7 +37,7 @@ impl ManifestVisitor {
|
|||
impl TarEntriesVisitor for ManifestVisitor {
|
||||
type Target = Manifest<Meta>;
|
||||
|
||||
fn visit<R: Read>(&mut self, entries: Entries<'_, R>) -> Result<(), BinstallError> {
|
||||
fn visit<R: Read>(&mut self, entries: Entries<'_, R>) -> Result<(), DownloadError> {
|
||||
for res in entries {
|
||||
let mut entry = res?;
|
||||
let path = entry.path()?;
|
||||
|
@ -71,16 +71,20 @@ impl TarEntriesVisitor for ManifestVisitor {
|
|||
}
|
||||
|
||||
/// Load binstall metadata using the extracted information stored in memory.
|
||||
fn finish(self) -> Result<Self::Target, BinstallError> {
|
||||
debug!("Loading manifest directly from extracted file");
|
||||
|
||||
// Load and parse manifest
|
||||
let mut manifest = Manifest::from_slice_with_metadata(&self.cargo_toml_content)?;
|
||||
|
||||
// Checks vfs for binary output names
|
||||
manifest.complete_from_abstract_filesystem(&self.vfs)?;
|
||||
|
||||
// Return metadata
|
||||
Ok(manifest)
|
||||
fn finish(self) -> Result<Self::Target, DownloadError> {
|
||||
Ok(load_manifest(&self.cargo_toml_content, &self.vfs).map_err(io::Error::from)?)
|
||||
}
|
||||
}
|
||||
|
||||
fn load_manifest(slice: &[u8], vfs: &Vfs) -> Result<Manifest<Meta>, BinstallError> {
|
||||
debug!("Loading manifest directly from extracted file");
|
||||
|
||||
// Load and parse manifest
|
||||
let mut manifest = Manifest::from_slice_with_metadata(slice)?;
|
||||
|
||||
// Checks vfs for binary output names
|
||||
manifest.complete_from_abstract_filesystem(vfs)?;
|
||||
|
||||
// Return metadata
|
||||
Ok(manifest)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,10 @@ use std::{
|
|||
process::{ExitCode, ExitStatus, Termination},
|
||||
};
|
||||
|
||||
use binstalk_downloader::{
|
||||
download::{DownloadError, ZipError},
|
||||
remote::{Error as RemoteError, HttpError, ReqwestError},
|
||||
};
|
||||
use compact_str::CompactString;
|
||||
use miette::{Diagnostic, Report};
|
||||
use thiserror::Error;
|
||||
|
@ -47,7 +51,7 @@ pub enum BinstallError {
|
|||
/// - Exit: 66
|
||||
#[error(transparent)]
|
||||
#[diagnostic(severity(error), code(binstall::unzip))]
|
||||
Unzip(#[from] zip::result::ZipError),
|
||||
Unzip(#[from] ZipError),
|
||||
|
||||
/// A rendering error in a template.
|
||||
///
|
||||
|
@ -65,7 +69,7 @@ pub enum BinstallError {
|
|||
/// - Exit: 68
|
||||
#[error(transparent)]
|
||||
#[diagnostic(severity(error), code(binstall::reqwest))]
|
||||
Reqwest(#[from] reqwest::Error),
|
||||
Reqwest(#[from] ReqwestError),
|
||||
|
||||
/// An HTTP request failed.
|
||||
///
|
||||
|
@ -74,14 +78,9 @@ pub enum BinstallError {
|
|||
///
|
||||
/// - Code: `binstall::http`
|
||||
/// - Exit: 69
|
||||
#[error("could not {method} {url}")]
|
||||
#[error(transparent)]
|
||||
#[diagnostic(severity(error), code(binstall::http))]
|
||||
Http {
|
||||
method: reqwest::Method,
|
||||
url: url::Url,
|
||||
#[source]
|
||||
err: reqwest::Error,
|
||||
},
|
||||
Http(#[from] HttpError),
|
||||
|
||||
/// A subprocess failed.
|
||||
///
|
||||
|
@ -418,3 +417,27 @@ impl From<BinstallError> for io::Error {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RemoteError> for BinstallError {
|
||||
fn from(e: RemoteError) -> Self {
|
||||
use RemoteError::*;
|
||||
|
||||
match e {
|
||||
Reqwest(reqwest_error) => reqwest_error.into(),
|
||||
Http(http_error) => http_error.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DownloadError> for BinstallError {
|
||||
fn from(e: DownloadError) -> Self {
|
||||
use DownloadError::*;
|
||||
|
||||
match e {
|
||||
Unzip(zip_error) => zip_error.into(),
|
||||
Remote(remote_error) => remote_error.into(),
|
||||
Io(io_error) => io_error.into(),
|
||||
UserAbort => BinstallError::UserAbort,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ use crate::{
|
|||
helpers::{
|
||||
download::Download,
|
||||
remote::{Client, Method},
|
||||
signal::wait_on_cancellation_signal,
|
||||
tasks::AutoAbortJoinHandle,
|
||||
},
|
||||
manifests::cargo_toml_binstall::{PkgFmt, PkgMeta},
|
||||
|
@ -146,9 +147,9 @@ impl super::Fetcher for GhCrateMeta {
|
|||
async fn fetch_and_extract(&self, dst: &Path) -> Result<(), BinstallError> {
|
||||
let (url, pkg_fmt) = self.resolution.get().unwrap(); // find() is called first
|
||||
debug!("Downloading package from: '{url}' dst:{dst:?} fmt:{pkg_fmt:?}");
|
||||
Download::new(self.client.clone(), url.clone())
|
||||
.and_extract(*pkg_fmt, dst)
|
||||
.await
|
||||
Ok(Download::new(self.client.clone(), url.clone())
|
||||
.and_extract(*pkg_fmt, dst, Some(Box::pin(wait_on_cancellation_signal())))
|
||||
.await?)
|
||||
}
|
||||
|
||||
fn pkg_fmt(&self) -> PkgFmt {
|
||||
|
|
|
@ -10,6 +10,7 @@ use crate::{
|
|||
helpers::{
|
||||
download::Download,
|
||||
remote::{Client, Method},
|
||||
signal::wait_on_cancellation_signal,
|
||||
},
|
||||
manifests::cargo_toml_binstall::{PkgFmt, PkgMeta},
|
||||
};
|
||||
|
@ -44,17 +45,22 @@ impl super::Fetcher for QuickInstall {
|
|||
let url = self.package_url();
|
||||
self.report();
|
||||
debug!("Checking for package at: '{url}'");
|
||||
self.client
|
||||
Ok(self
|
||||
.client
|
||||
.remote_exists(Url::parse(&url)?, Method::HEAD)
|
||||
.await
|
||||
.await?)
|
||||
}
|
||||
|
||||
async fn fetch_and_extract(&self, dst: &Path) -> Result<(), BinstallError> {
|
||||
let url = self.package_url();
|
||||
debug!("Downloading package from: '{url}'");
|
||||
Download::new(self.client.clone(), Url::parse(&url)?)
|
||||
.and_extract(self.pkg_fmt(), dst)
|
||||
.await
|
||||
Ok(Download::new(self.client.clone(), Url::parse(&url)?)
|
||||
.and_extract(
|
||||
self.pkg_fmt(),
|
||||
dst,
|
||||
Some(Box::pin(wait_on_cancellation_signal())),
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
||||
fn pkg_fmt(&self) -> PkgFmt {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
pub mod download;
|
||||
pub mod jobserver_client;
|
||||
pub mod remote;
|
||||
pub mod signal;
|
||||
pub mod tasks;
|
||||
|
||||
pub use binstalk_downloader::{download, remote};
|
||||
|
|
|
@ -1,112 +0,0 @@
|
|||
use std::{fmt::Debug, marker::PhantomData, path::Path};
|
||||
|
||||
use digest::{Digest, FixedOutput, HashMarker, Output, OutputSizeUser, Update};
|
||||
use log::debug;
|
||||
|
||||
use crate::{
|
||||
errors::BinstallError,
|
||||
helpers::remote::{Client, Url},
|
||||
manifests::cargo_toml_binstall::{PkgFmt, PkgFmtDecomposed, TarBasedFmt},
|
||||
};
|
||||
|
||||
pub use async_extracter::TarEntriesVisitor;
|
||||
use async_extracter::*;
|
||||
|
||||
mod async_extracter;
|
||||
mod extracter;
|
||||
mod stream_readable;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Download<D: Digest = NoDigest> {
|
||||
client: Client,
|
||||
url: Url,
|
||||
_digest: PhantomData<D>,
|
||||
_checksum: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Download {
|
||||
pub fn new(client: Client, url: Url) -> Self {
|
||||
Self {
|
||||
client,
|
||||
url,
|
||||
_digest: PhantomData::default(),
|
||||
_checksum: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Download a file from the provided URL and extract part of it to
|
||||
/// the provided path.
|
||||
///
|
||||
/// * `filter` - If Some, then it will pass the path of the file to it
|
||||
/// and only extract ones which filter returns `true`.
|
||||
///
|
||||
/// This does not support verifying a checksum due to the partial extraction
|
||||
/// and will ignore one if specified.
|
||||
pub async fn and_visit_tar<V: TarEntriesVisitor + Debug + Send + 'static>(
|
||||
self,
|
||||
fmt: TarBasedFmt,
|
||||
visitor: V,
|
||||
) -> Result<V::Target, BinstallError> {
|
||||
let stream = self.client.create_request(self.url).await?;
|
||||
|
||||
debug!("Downloading and extracting then in-memory processing");
|
||||
|
||||
let ret = extract_tar_based_stream_and_visit(stream, fmt, visitor).await?;
|
||||
|
||||
debug!("Download, extraction and in-memory procession OK");
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Download a file from the provided URL and extract it to the provided path.
|
||||
pub async fn and_extract(
|
||||
self,
|
||||
fmt: PkgFmt,
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<(), BinstallError> {
|
||||
let stream = self.client.create_request(self.url).await?;
|
||||
|
||||
let path = path.as_ref();
|
||||
debug!("Downloading and extracting to: '{}'", path.display());
|
||||
|
||||
match fmt.decompose() {
|
||||
PkgFmtDecomposed::Tar(fmt) => extract_tar_based_stream(stream, path, fmt).await?,
|
||||
PkgFmtDecomposed::Bin => extract_bin(stream, path).await?,
|
||||
PkgFmtDecomposed::Zip => extract_zip(stream, path).await?,
|
||||
}
|
||||
|
||||
debug!("Download OK, extracted to: '{}'", path.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Digest> Download<D> {
|
||||
pub fn new_with_checksum(client: Client, url: Url, checksum: Vec<u8>) -> Self {
|
||||
Self {
|
||||
client,
|
||||
url,
|
||||
_digest: PhantomData::default(),
|
||||
_checksum: checksum,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: implement checking the sum, may involve bringing (parts of) and_extract() back in here
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct NoDigest;
|
||||
|
||||
impl FixedOutput for NoDigest {
|
||||
fn finalize_into(self, _out: &mut Output<Self>) {}
|
||||
}
|
||||
|
||||
impl OutputSizeUser for NoDigest {
|
||||
type OutputSize = generic_array::typenum::U0;
|
||||
}
|
||||
|
||||
impl Update for NoDigest {
|
||||
fn update(&mut self, _data: &[u8]) {}
|
||||
}
|
||||
|
||||
impl HashMarker for NoDigest {}
|
|
@ -1,114 +0,0 @@
|
|||
use std::{
|
||||
fmt::Debug,
|
||||
fs,
|
||||
io::{Read, Seek},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures_util::stream::Stream;
|
||||
use log::debug;
|
||||
use scopeguard::{guard, ScopeGuard};
|
||||
use tar::Entries;
|
||||
use tempfile::tempfile;
|
||||
use tokio::task::block_in_place;
|
||||
|
||||
use super::{extracter::*, stream_readable::StreamReadable};
|
||||
use crate::{errors::BinstallError, manifests::cargo_toml_binstall::TarBasedFmt};
|
||||
|
||||
pub async fn extract_bin<S, E>(stream: S, path: &Path) -> Result<(), BinstallError>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
let mut reader = StreamReadable::new(stream).await;
|
||||
block_in_place(move || {
|
||||
fs::create_dir_all(path.parent().unwrap())?;
|
||||
|
||||
let mut file = fs::File::create(path)?;
|
||||
|
||||
// remove it unless the operation isn't aborted and no write
|
||||
// fails.
|
||||
let remove_guard = guard(&path, |path| {
|
||||
fs::remove_file(path).ok();
|
||||
});
|
||||
|
||||
reader.copy(&mut file)?;
|
||||
|
||||
// Operation isn't aborted and all writes succeed,
|
||||
// disarm the remove_guard.
|
||||
ScopeGuard::into_inner(remove_guard);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn extract_zip<S, E>(stream: S, path: &Path) -> Result<(), BinstallError>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
let mut reader = StreamReadable::new(stream).await;
|
||||
block_in_place(move || {
|
||||
fs::create_dir_all(path.parent().unwrap())?;
|
||||
|
||||
let mut file = tempfile()?;
|
||||
|
||||
reader.copy(&mut file)?;
|
||||
|
||||
// rewind it so that we can pass it to unzip
|
||||
file.rewind()?;
|
||||
|
||||
unzip(file, path)
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn extract_tar_based_stream<S, E>(
|
||||
stream: S,
|
||||
path: &Path,
|
||||
fmt: TarBasedFmt,
|
||||
) -> Result<(), BinstallError>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
let reader = StreamReadable::new(stream).await;
|
||||
block_in_place(move || {
|
||||
fs::create_dir_all(path.parent().unwrap())?;
|
||||
|
||||
debug!("Extracting from {fmt} archive to {path:#?}");
|
||||
|
||||
create_tar_decoder(reader, fmt)?.unpack(path)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Visitor must iterate over all entries.
|
||||
/// Entires can be in arbitary order.
|
||||
pub trait TarEntriesVisitor {
|
||||
type Target;
|
||||
|
||||
fn visit<R: Read>(&mut self, entries: Entries<'_, R>) -> Result<(), BinstallError>;
|
||||
fn finish(self) -> Result<Self::Target, BinstallError>;
|
||||
}
|
||||
|
||||
pub async fn extract_tar_based_stream_and_visit<S, V, E>(
|
||||
stream: S,
|
||||
fmt: TarBasedFmt,
|
||||
mut visitor: V,
|
||||
) -> Result<V::Target, BinstallError>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
|
||||
V: TarEntriesVisitor + Debug + Send + 'static,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
let reader = StreamReadable::new(stream).await;
|
||||
block_in_place(move || {
|
||||
debug!("Extracting from {fmt} archive to process it in memory");
|
||||
|
||||
let mut tar = create_tar_decoder(reader, fmt)?;
|
||||
visitor.visit(tar.entries()?)?;
|
||||
visitor.finish()
|
||||
})
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
use std::{
|
||||
fs::File,
|
||||
io::{self, BufRead, Read},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use bzip2::bufread::BzDecoder;
|
||||
use flate2::bufread::GzDecoder;
|
||||
use log::debug;
|
||||
use tar::Archive;
|
||||
use xz2::bufread::XzDecoder;
|
||||
use zip::read::ZipArchive;
|
||||
use zstd::stream::Decoder as ZstdDecoder;
|
||||
|
||||
use crate::{errors::BinstallError, manifests::cargo_toml_binstall::TarBasedFmt};
|
||||
|
||||
pub fn create_tar_decoder(
|
||||
dat: impl BufRead + 'static,
|
||||
fmt: TarBasedFmt,
|
||||
) -> io::Result<Archive<Box<dyn Read>>> {
|
||||
use TarBasedFmt::*;
|
||||
|
||||
let r: Box<dyn Read> = match fmt {
|
||||
Tar => Box::new(dat),
|
||||
Tbz2 => Box::new(BzDecoder::new(dat)),
|
||||
Tgz => Box::new(GzDecoder::new(dat)),
|
||||
Txz => Box::new(XzDecoder::new(dat)),
|
||||
Tzstd => {
|
||||
// The error can only come from raw::Decoder::with_dictionary as of zstd 0.10.2 and
|
||||
// 0.11.2, which is specified as `&[]` by `ZstdDecoder::new`, thus `ZstdDecoder::new`
|
||||
// should not return any error.
|
||||
Box::new(ZstdDecoder::with_buffer(dat)?)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Archive::new(r))
|
||||
}
|
||||
|
||||
pub fn unzip(dat: File, dst: &Path) -> Result<(), BinstallError> {
|
||||
debug!("Decompressing from zip archive to `{dst:?}`");
|
||||
|
||||
let mut zip = ZipArchive::new(dat)?;
|
||||
zip.extract(dst)?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
use std::{
|
||||
cmp::min,
|
||||
future::Future,
|
||||
io::{self, BufRead, Read, Write},
|
||||
pin::Pin,
|
||||
};
|
||||
|
||||
use bytes::{Buf, Bytes};
|
||||
use futures_util::stream::{Stream, StreamExt};
|
||||
use tokio::runtime::Handle;
|
||||
|
||||
use crate::{errors::BinstallError, helpers::signal::wait_on_cancellation_signal};
|
||||
|
||||
/// This wraps an AsyncIterator as a `Read`able.
|
||||
/// It must be used in non-async context only,
|
||||
/// meaning you have to use it with
|
||||
/// `tokio::task::{block_in_place, spawn_blocking}` or
|
||||
/// `std::thread::spawn`.
|
||||
pub struct StreamReadable<S> {
|
||||
stream: S,
|
||||
handle: Handle,
|
||||
bytes: Bytes,
|
||||
cancellation_future: Pin<Box<dyn Future<Output = Result<(), io::Error>> + Send>>,
|
||||
}
|
||||
|
||||
impl<S> StreamReadable<S> {
|
||||
pub(super) async fn new(stream: S) -> Self {
|
||||
Self {
|
||||
stream,
|
||||
handle: Handle::current(),
|
||||
bytes: Bytes::new(),
|
||||
cancellation_future: Box::pin(wait_on_cancellation_signal()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> StreamReadable<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
/// Copies from `self` to `writer`.
|
||||
///
|
||||
/// Same as `io::copy` but does not allocate any internal buffer
|
||||
/// since `self` is buffered.
|
||||
pub(super) fn copy<W>(&mut self, mut writer: W) -> io::Result<()>
|
||||
where
|
||||
W: Write,
|
||||
{
|
||||
self.copy_inner(&mut writer)
|
||||
}
|
||||
|
||||
fn copy_inner(&mut self, writer: &mut dyn Write) -> io::Result<()> {
|
||||
loop {
|
||||
let buf = self.fill_buf()?;
|
||||
if buf.is_empty() {
|
||||
// Eof
|
||||
break Ok(());
|
||||
}
|
||||
|
||||
writer.write_all(buf)?;
|
||||
|
||||
let n = buf.len();
|
||||
self.consume(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> Read for StreamReadable<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
if buf.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
if self.fill_buf()?.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let bytes = &mut self.bytes;
|
||||
|
||||
// copy_to_slice requires the bytes to have enough remaining bytes
|
||||
// to fill buf.
|
||||
let n = min(buf.len(), bytes.remaining());
|
||||
|
||||
bytes.copy_to_slice(&mut buf[..n]);
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
}
|
||||
|
||||
/// If `Ok(Some(bytes))` if returned, then `bytes.is_empty() == false`.
|
||||
async fn next_stream<S, E>(stream: &mut S) -> io::Result<Option<Bytes>>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
loop {
|
||||
let option = stream
|
||||
.next()
|
||||
.await
|
||||
.transpose()
|
||||
.map_err(BinstallError::from)?;
|
||||
|
||||
match option {
|
||||
Some(bytes) if bytes.is_empty() => continue,
|
||||
option => break Ok(option),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> BufRead for StreamReadable<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
BinstallError: From<E>,
|
||||
{
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
let bytes = &mut self.bytes;
|
||||
|
||||
if !bytes.has_remaining() {
|
||||
let option = self.handle.block_on(async {
|
||||
tokio::select! {
|
||||
res = next_stream(&mut self.stream) => res,
|
||||
res = self.cancellation_future.as_mut() => {
|
||||
Err(res.err().unwrap_or_else(|| io::Error::from(BinstallError::UserAbort)))
|
||||
},
|
||||
}
|
||||
})?;
|
||||
|
||||
if let Some(new_bytes) = option {
|
||||
// new_bytes are guaranteed to be non-empty.
|
||||
*bytes = new_bytes;
|
||||
}
|
||||
}
|
||||
Ok(&*bytes)
|
||||
}
|
||||
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.bytes.advance(amt);
|
||||
}
|
||||
}
|
|
@ -1,177 +0,0 @@
|
|||
use std::{
|
||||
env,
|
||||
num::NonZeroU64,
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures_util::stream::Stream;
|
||||
use httpdate::parse_http_date;
|
||||
use log::{debug, info};
|
||||
use reqwest::{
|
||||
header::{HeaderMap, RETRY_AFTER},
|
||||
Request, Response, StatusCode,
|
||||
};
|
||||
use tokio::{sync::Mutex, time::sleep};
|
||||
use tower::{limit::rate::RateLimit, Service, ServiceBuilder, ServiceExt};
|
||||
|
||||
use crate::errors::BinstallError;
|
||||
|
||||
pub use reqwest::{tls, Method};
|
||||
pub use url::Url;
|
||||
|
||||
const MAX_RETRY_DURATION: Duration = Duration::from_secs(120);
|
||||
const MAX_RETRY_COUNT: u8 = 3;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Client {
|
||||
client: reqwest::Client,
|
||||
rate_limit: Arc<Mutex<RateLimit<reqwest::Client>>>,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
/// * `per` - must not be 0.
|
||||
pub fn new(
|
||||
min_tls: Option<tls::Version>,
|
||||
per: Duration,
|
||||
num_request: NonZeroU64,
|
||||
) -> Result<Self, BinstallError> {
|
||||
const USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
let mut builder = reqwest::ClientBuilder::new()
|
||||
.user_agent(USER_AGENT)
|
||||
.https_only(true)
|
||||
.min_tls_version(tls::Version::TLS_1_2)
|
||||
.tcp_nodelay(false);
|
||||
|
||||
if let Some(ver) = min_tls {
|
||||
builder = builder.min_tls_version(ver);
|
||||
}
|
||||
|
||||
let client = builder.build()?;
|
||||
|
||||
Ok(Self {
|
||||
client: client.clone(),
|
||||
rate_limit: Arc::new(Mutex::new(
|
||||
ServiceBuilder::new()
|
||||
.rate_limit(num_request.get(), per)
|
||||
.service(client),
|
||||
)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_inner(&self) -> &reqwest::Client {
|
||||
&self.client
|
||||
}
|
||||
|
||||
async fn send_request_inner(
|
||||
&self,
|
||||
method: &Method,
|
||||
url: &Url,
|
||||
) -> Result<Response, reqwest::Error> {
|
||||
let mut count = 0;
|
||||
|
||||
loop {
|
||||
let request = Request::new(method.clone(), url.clone());
|
||||
|
||||
// Reduce critical section:
|
||||
// - Construct the request before locking
|
||||
// - Once the rate_limit is ready, call it and obtain
|
||||
// the future, then release the lock before
|
||||
// polling the future, which performs network I/O that could
|
||||
// take really long.
|
||||
let future = self.rate_limit.lock().await.ready().await?.call(request);
|
||||
|
||||
let response = future.await?;
|
||||
|
||||
let status = response.status();
|
||||
|
||||
match (status, parse_header_retry_after(response.headers())) {
|
||||
(
|
||||
// 503 429
|
||||
StatusCode::SERVICE_UNAVAILABLE | StatusCode::TOO_MANY_REQUESTS,
|
||||
Some(duration),
|
||||
) if duration <= MAX_RETRY_DURATION && count < MAX_RETRY_COUNT => {
|
||||
info!("Receiver status code {status}, will wait for {duration:#?} and retry");
|
||||
sleep(duration).await
|
||||
}
|
||||
_ => break Ok(response),
|
||||
}
|
||||
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_request(
|
||||
&self,
|
||||
method: Method,
|
||||
url: Url,
|
||||
error_for_status: bool,
|
||||
) -> Result<Response, BinstallError> {
|
||||
self.send_request_inner(&method, &url)
|
||||
.await
|
||||
.and_then(|response| {
|
||||
if error_for_status {
|
||||
response.error_for_status()
|
||||
} else {
|
||||
Ok(response)
|
||||
}
|
||||
})
|
||||
.map_err(|err| BinstallError::Http { method, url, err })
|
||||
}
|
||||
|
||||
pub async fn remote_exists(&self, url: Url, method: Method) -> Result<bool, BinstallError> {
|
||||
Ok(self
|
||||
.send_request(method, url, false)
|
||||
.await?
|
||||
.status()
|
||||
.is_success())
|
||||
}
|
||||
|
||||
pub async fn get_redirected_final_url(&self, url: Url) -> Result<Url, BinstallError> {
|
||||
Ok(self
|
||||
.send_request(Method::HEAD, url, true)
|
||||
.await?
|
||||
.url()
|
||||
.clone())
|
||||
}
|
||||
|
||||
pub(crate) async fn create_request(
|
||||
&self,
|
||||
url: Url,
|
||||
) -> Result<impl Stream<Item = reqwest::Result<Bytes>>, BinstallError> {
|
||||
debug!("Downloading from: '{url}'");
|
||||
|
||||
self.send_request(Method::GET, url, true)
|
||||
.await
|
||||
.map(Response::bytes_stream)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_header_retry_after(headers: &HeaderMap) -> Option<Duration> {
|
||||
let header = headers
|
||||
.get_all(RETRY_AFTER)
|
||||
.into_iter()
|
||||
.last()?
|
||||
.to_str()
|
||||
.ok()?;
|
||||
|
||||
match header.parse::<u64>() {
|
||||
Ok(dur) => Some(Duration::from_secs(dur)),
|
||||
Err(_) => {
|
||||
let system_time = parse_http_date(header).ok()?;
|
||||
|
||||
let retry_after_unix_timestamp =
|
||||
system_time.duration_since(SystemTime::UNIX_EPOCH).ok()?;
|
||||
|
||||
let curr_time_unix_timestamp = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("SystemTime before UNIX EPOCH!");
|
||||
|
||||
// retry_after_unix_timestamp - curr_time_unix_timestamp
|
||||
// If underflows, returns Duration::ZERO.
|
||||
Some(retry_after_unix_timestamp.saturating_sub(curr_time_unix_timestamp))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,11 +1,10 @@
|
|||
use std::io;
|
||||
|
||||
use futures_util::future::pending;
|
||||
use tokio::{signal, sync::OnceCell};
|
||||
use std::{future::pending, io};
|
||||
|
||||
use super::tasks::AutoAbortJoinHandle;
|
||||
use crate::errors::BinstallError;
|
||||
|
||||
use tokio::{signal, sync::OnceCell};
|
||||
|
||||
/// This function will poll the handle while listening for ctrl_c,
|
||||
/// `SIGINT`, `SIGHUP`, `SIGTERM` and `SIGQUIT`.
|
||||
///
|
||||
|
@ -18,19 +17,24 @@ use crate::errors::BinstallError;
|
|||
pub async fn cancel_on_user_sig_term<T>(
|
||||
handle: AutoAbortJoinHandle<T>,
|
||||
) -> Result<T, BinstallError> {
|
||||
#[cfg(unix)]
|
||||
unix::ignore_signals_on_unix()?;
|
||||
ignore_signals()?;
|
||||
|
||||
tokio::select! {
|
||||
res = handle => res,
|
||||
res = wait_on_cancellation_signal() => {
|
||||
res
|
||||
.map_err(BinstallError::Io)
|
||||
res.map_err(BinstallError::Io)
|
||||
.and(Err(BinstallError::UserAbort))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ignore_signals() -> io::Result<()> {
|
||||
#[cfg(unix)]
|
||||
unix::ignore_signals_on_unix()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// If call to it returns `Ok(())`, then all calls to this function after
|
||||
/// that also returns `Ok(())`.
|
||||
pub async fn wait_on_cancellation_signal() -> Result<(), io::Error> {
|
||||
|
@ -86,7 +90,7 @@ mod unix {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn ignore_signals_on_unix() -> Result<(), BinstallError> {
|
||||
pub fn ignore_signals_on_unix() -> Result<(), io::Error> {
|
||||
drop(signal(SignalKind::user_defined1())?);
|
||||
drop(signal(SignalKind::user_defined2())?);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue