Refactor binstalk-downloader APIs: Remove cancellation_future plus optimizations (#591)

- Refactor: Mv fn `utils::asyncify` into mod `utils`
 - Improve err msg for task failure in `utils::asyncify`
 - Make sure `asyncify` always returns the same annoymous type
   that implements `Future` if the `T` is same.
 - Rewrite `extract_bin` to avoid `block_in_place`
   support cancellation by dropping
 - Rm unused dep scopeguard from binstalk-downloader
 - Rewrite `extract_tar_based_stream` so that it is cancellable by dropping
 - Unbox `extract_future` in `async_extracter::extract_zip`
 - Refactor `Download` API: Remove `CancellationFuture` as param

   since all futures returned by `Download::and_*` does not call
   `block_in_place`, so they can be cancelled by drop instead of using this
   cumbersome hack.
 - Fix exports from mod `async_tar_visitor`
 - Make `signal::{ignore_signals, wait_on_cancellation_signal}` private
 - Rm the global variable `CANCELLED` in `wait_on_cancellation_signal`
   and rm fn `wait_on_cancellation_signal_inner`
 - Optimize `wait_on_cancellation_signal`: Avoid `tokio::select!` on `not(unix)`
 - Rm unnecessary `tokio::select!` in `wait_on_cancellation_signal` on unix
   Since `unix::wait_on_cancellation_signal_unix` already waits for ctrl + c signal.
 - Optimize `extract_bin`: Send `Bytes` to blocking thread for zero-copy
 - Optimize `extract_with_blocking_decoder`: Avoid dup monomorphization
 - Box fut of `fetch_crate_cratesio` in `PackageInfo::resolve`
 - Optimize `extract_zip_entry`: Spawn only one blocking task per fn call

   by using a mspc queue for the data to be written to the `outfile`.

   This would improve efficiency as using `tokio::fs::File` is expensive:
   It spawns a new blocking task, which needs one heap allocation and then
   pushed to a mpmc queue, and then wait for it to be done on every loop.

   This also fix a race condition where the unix permission is set before
   the whole file is written, which might be used by attackers.
 - Optimize `extract_zip`: Use one `BytesMut` for entire extraction process
   To avoid frequent allocation and deallocation.
 - Optimize `extract_zip_entry`: Inc prob of reusing alloc in `BytesMut`

   Performs the reserve before sending the buf over mpsc queue to
   increase the possibility of reusing the previous allocation.

   NOTE: `BytesMut` only reuses the previous allocation if it is the
   only one holds the reference to it, which is either on the first
   allocation or all the `Bytes` in the mpsc queue has been consumed,
   written to the file and dropped.

   Since reading from entry would have to wait for external file I/O,
   this would give the blocking thread some time to flush `Bytes`
   out.
 - Disable unused feature fs of dep tokio

Signed-off-by: Jiahao XU <Jiahao_XU@outlook.com>
This commit is contained in:
Jiahao XU 2022-12-12 14:15:30 +11:00 committed by GitHub
parent 058208bae9
commit db45f2fb7f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 234 additions and 263 deletions

1
Cargo.lock generated
View file

@ -199,7 +199,6 @@ dependencies = [
"generic-array", "generic-array",
"httpdate", "httpdate",
"reqwest", "reqwest",
"scopeguard",
"tempfile", "tempfile",
"thiserror", "thiserror",
"tokio", "tokio",

View file

@ -22,7 +22,6 @@ futures-util = { version = "0.3.25", default-features = false, features = ["std"
generic-array = "0.14.6" generic-array = "0.14.6"
httpdate = "1.0.2" httpdate = "1.0.2"
reqwest = { version = "0.11.13", features = ["stream", "gzip", "brotli", "deflate"], default-features = false } reqwest = { version = "0.11.13", features = ["stream", "gzip", "brotli", "deflate"], default-features = false }
scopeguard = "1.1.0"
# Use a fork here since we need PAX support, but the upstream # Use a fork here since we need PAX support, but the upstream
# does not hav the PR merged yet. # does not hav the PR merged yet.
# #

View file

@ -1,4 +1,4 @@
use std::{fmt::Debug, future::Future, io, marker::PhantomData, path::Path, pin::Pin}; use std::{fmt::Debug, io, marker::PhantomData, path::Path};
use binstalk_types::cargo_toml_binstall::{PkgFmtDecomposed, TarBasedFmt}; use binstalk_types::cargo_toml_binstall::{PkgFmtDecomposed, TarBasedFmt};
use digest::{Digest, FixedOutput, HashMarker, Output, OutputSizeUser, Update}; use digest::{Digest, FixedOutput, HashMarker, Output, OutputSizeUser, Update};
@ -14,7 +14,8 @@ mod async_extracter;
use async_extracter::*; use async_extracter::*;
mod async_tar_visitor; mod async_tar_visitor;
pub use async_tar_visitor::*; use async_tar_visitor::extract_tar_based_stream_and_visit;
pub use async_tar_visitor::{TarEntriesVisitor, TarEntry, TarEntryType};
mod extracter; mod extracter;
mod stream_readable; mod stream_readable;
@ -23,9 +24,6 @@ mod zip_extraction;
pub use zip_extraction::ZipError; pub use zip_extraction::ZipError;
mod utils; mod utils;
use utils::await_on_option;
pub type CancellationFuture = Option<Pin<Box<dyn Future<Output = Result<(), io::Error>> + Send>>>;
#[derive(Debug, ThisError)] #[derive(Debug, ThisError)]
pub enum DownloadError { pub enum DownloadError {
@ -102,12 +100,11 @@ impl Download {
/// ///
/// NOTE that this API does not support gnu extension sparse file unlike /// NOTE that this API does not support gnu extension sparse file unlike
/// [`Download::and_extract`]. /// [`Download::and_extract`].
#[instrument(skip(visitor, cancellation_future))] #[instrument(skip(visitor))]
pub async fn and_visit_tar<V: TarEntriesVisitor + Debug + Send + 'static>( pub async fn and_visit_tar<V: TarEntriesVisitor + Debug + Send + 'static>(
self, self,
fmt: TarBasedFmt, fmt: TarBasedFmt,
visitor: V, visitor: V,
cancellation_future: CancellationFuture,
) -> Result<V::Target, DownloadError> { ) -> Result<V::Target, DownloadError> {
let stream = self let stream = self
.client .client
@ -117,14 +114,7 @@ impl Download {
debug!("Downloading and extracting then in-memory processing"); debug!("Downloading and extracting then in-memory processing");
let ret = tokio::select! { let ret = extract_tar_based_stream_and_visit(stream, fmt, visitor).await?;
biased;
res = await_on_option(cancellation_future) => {
Err(res.err().unwrap_or_else(|| io::Error::from(DownloadError::UserAbort)))?
}
res = extract_tar_based_stream_and_visit(stream, fmt, visitor) => res?,
};
debug!("Download, extraction and in-memory procession OK"); debug!("Download, extraction and in-memory procession OK");
@ -135,19 +125,13 @@ impl Download {
/// ///
/// `cancellation_future` can be used to cancel the extraction and return /// `cancellation_future` can be used to cancel the extraction and return
/// [`DownloadError::UserAbort`] error. /// [`DownloadError::UserAbort`] error.
#[instrument(skip(path, cancellation_future))] #[instrument(skip(path))]
pub async fn and_extract( pub async fn and_extract(
self, self,
fmt: PkgFmt, fmt: PkgFmt,
path: impl AsRef<Path>, path: impl AsRef<Path>,
cancellation_future: CancellationFuture,
) -> Result<(), DownloadError> { ) -> Result<(), DownloadError> {
async fn inner( async fn inner(this: Download, fmt: PkgFmt, path: &Path) -> Result<(), DownloadError> {
this: Download,
fmt: PkgFmt,
path: &Path,
cancellation_future: CancellationFuture,
) -> Result<(), DownloadError> {
let stream = this let stream = this
.client .client
.get_stream(this.url) .get_stream(this.url)
@ -157,11 +141,9 @@ impl Download {
debug!("Downloading and extracting to: '{}'", path.display()); debug!("Downloading and extracting to: '{}'", path.display());
match fmt.decompose() { match fmt.decompose() {
PkgFmtDecomposed::Tar(fmt) => { PkgFmtDecomposed::Tar(fmt) => extract_tar_based_stream(stream, path, fmt).await?,
extract_tar_based_stream(stream, path, fmt, cancellation_future).await? PkgFmtDecomposed::Bin => extract_bin(stream, path).await?,
} PkgFmtDecomposed::Zip => extract_zip(stream, path).await?,
PkgFmtDecomposed::Bin => extract_bin(stream, path, cancellation_future).await?,
PkgFmtDecomposed::Zip => extract_zip(stream, path, cancellation_future).await?,
} }
debug!("Download OK, extracted to: '{}'", path.display()); debug!("Download OK, extracted to: '{}'", path.display());
@ -169,7 +151,7 @@ impl Download {
Ok(()) Ok(())
} }
inner(self, fmt, path.as_ref(), cancellation_future).await inner(self, fmt, path.as_ref()).await
} }
} }

View file

@ -1,96 +1,137 @@
use std::{fs, path::Path}; use std::{
fs,
future::Future,
io::{self, Write},
path::Path,
};
use async_zip::read::stream::ZipFileReader; use async_zip::read::stream::ZipFileReader;
use bytes::Bytes; use bytes::{Bytes, BytesMut};
use futures_util::stream::Stream; use futures_util::{
use scopeguard::{guard, ScopeGuard}; future::try_join,
use tokio::task::block_in_place; stream::{Stream, StreamExt},
};
use tokio::sync::mpsc;
use tokio_util::io::StreamReader; use tokio_util::io::StreamReader;
use tracing::debug; use tracing::debug;
use super::{ use super::{
await_on_option, extracter::*, stream_readable::StreamReadable, extracter::*, stream_readable::StreamReadable, utils::asyncify,
zip_extraction::extract_zip_entry, CancellationFuture, DownloadError, TarBasedFmt, ZipError, zip_extraction::extract_zip_entry, DownloadError, TarBasedFmt, ZipError,
}; };
pub async fn extract_bin<S>( pub async fn extract_bin<S>(stream: S, path: &Path) -> Result<(), DownloadError>
stream: S,
path: &Path,
cancellation_future: CancellationFuture,
) -> Result<(), DownloadError>
where where
S: Stream<Item = Result<Bytes, DownloadError>> + Unpin + 'static, S: Stream<Item = Result<Bytes, DownloadError>> + Send + Sync + Unpin + 'static,
{ {
let mut reader = StreamReadable::new(stream, cancellation_future).await; debug!("Writing to `{}`", path.display());
block_in_place(move || {
fs::create_dir_all(path.parent().unwrap())?;
extract_with_blocking_decoder(stream, path, |mut rx, path| {
let mut file = fs::File::create(path)?; let mut file = fs::File::create(path)?;
// remove it unless the operation isn't aborted and no write while let Some(bytes) = rx.blocking_recv() {
// fails. file.write_all(&bytes)?;
let remove_guard = guard(&path, |path| { }
fs::remove_file(path).ok();
});
reader.copy(&mut file)?; file.flush()
// Operation isn't aborted and all writes succeed,
// disarm the remove_guard.
ScopeGuard::into_inner(remove_guard);
Ok(())
}) })
.await
} }
pub async fn extract_zip<S>( pub async fn extract_zip<S>(stream: S, path: &Path) -> Result<(), DownloadError>
stream: S,
path: &Path,
cancellation_future: CancellationFuture,
) -> Result<(), DownloadError>
where where
S: Stream<Item = Result<Bytes, DownloadError>> + Unpin + Send + Sync + 'static, S: Stream<Item = Result<Bytes, DownloadError>> + Unpin + Send + Sync + 'static,
{ {
debug!("Decompressing from zip archive to `{}`", path.display()); debug!("Decompressing from zip archive to `{}`", path.display());
let extract_future = Box::pin(async move { let reader = StreamReader::new(stream);
let reader = StreamReader::new(stream); let mut zip = ZipFileReader::new(reader);
let mut zip = ZipFileReader::new(reader); let mut buf = BytesMut::with_capacity(4 * 4096);
while let Some(entry) = zip.entry_reader().await.map_err(ZipError::from_inner)? { while let Some(entry) = zip.entry_reader().await.map_err(ZipError::from_inner)? {
extract_zip_entry(entry, path).await?; extract_zip_entry(entry, path, &mut buf).await?;
}
Ok(())
});
tokio::select! {
biased;
res = await_on_option(cancellation_future) => {
Err(res.err().map(DownloadError::from).unwrap_or(DownloadError::UserAbort))
}
res = extract_future => res,
} }
Ok(())
} }
pub async fn extract_tar_based_stream<S>( pub async fn extract_tar_based_stream<S>(
stream: S, stream: S,
path: &Path, path: &Path,
fmt: TarBasedFmt, fmt: TarBasedFmt,
cancellation_future: CancellationFuture,
) -> Result<(), DownloadError> ) -> Result<(), DownloadError>
where where
S: Stream<Item = Result<Bytes, DownloadError>> + Unpin + 'static, S: Stream<Item = Result<Bytes, DownloadError>> + Send + Sync + Unpin + 'static,
{ {
let reader = StreamReadable::new(stream, cancellation_future).await; debug!("Extracting from {fmt} archive to {path:#?}");
block_in_place(move || {
fs::create_dir_all(path.parent().unwrap())?;
debug!("Extracting from {fmt} archive to {path:#?}"); extract_with_blocking_decoder(stream, path, move |rx, path| {
create_tar_decoder(StreamReadable::new(rx), fmt)?.unpack(path)
})
.await
}
create_tar_decoder(reader, fmt)?.unpack(path)?; async fn extract_with_blocking_decoder<S, F>(
stream: S,
path: &Path,
f: F,
) -> Result<(), DownloadError>
where
S: Stream<Item = Result<Bytes, DownloadError>> + Send + Sync + Unpin + 'static,
F: FnOnce(mpsc::Receiver<Bytes>, &Path) -> io::Result<()> + Send + Sync + 'static,
{
async fn inner<S, Fut>(
mut stream: S,
task: Fut,
tx: mpsc::Sender<Bytes>,
) -> Result<(), DownloadError>
where
// We do not use trait object for S since there will only be one
// S used with this function.
S: Stream<Item = Result<Bytes, DownloadError>> + Send + Sync + Unpin + 'static,
// asyncify would always return the same future, so no need to
// use trait object here.
Fut: Future<Output = io::Result<()>> + Send + Sync,
{
try_join(
async move {
while let Some(bytes) = stream.next().await.transpose()? {
if tx.send(bytes).await.is_err() {
// The extract tar returns, which could be that:
// - Extraction fails with an error
// - Extraction success without the rest of the data
//
//
// It's hard to tell the difference here, so we assume
// the first scienario occurs.
//
// Even if the second scienario occurs, it won't affect the
// extraction process anyway, so we can jsut ignore it.
return Ok(());
}
}
Ok(())
},
task,
)
.await?;
Ok(()) Ok(())
}) }
// Use channel size = 5 to minimize the waiting time in the extraction task
let (tx, rx) = mpsc::channel(5);
let path = path.to_owned();
let task = asyncify(move || {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
f(rx, &path)
});
inner(stream, task, tx).await
} }

View file

@ -91,7 +91,7 @@ pub trait TarEntriesVisitor: Send + Sync {
fn finish(self) -> Result<Self::Target, DownloadError>; fn finish(self) -> Result<Self::Target, DownloadError>;
} }
pub async fn extract_tar_based_stream_and_visit<S, V>( pub(crate) async fn extract_tar_based_stream_and_visit<S, V>(
stream: S, stream: S,
fmt: TarBasedFmt, fmt: TarBasedFmt,
mut visitor: V, mut visitor: V,

View file

@ -1,74 +1,28 @@
use std::{ use std::io::{self, BufRead, Read};
cmp::min,
io::{self, BufRead, Read, Write},
};
use bytes::{Buf, Bytes}; use bytes::{Buf, Bytes};
use futures_util::stream::{Stream, StreamExt}; use tokio::sync::mpsc;
use tokio::runtime::Handle;
use super::{await_on_option, CancellationFuture, DownloadError};
/// This wraps an AsyncIterator as a `Read`able. /// This wraps an AsyncIterator as a `Read`able.
/// It must be used in non-async context only, /// It must be used in non-async context only,
/// meaning you have to use it with /// meaning you have to use it with
/// `tokio::task::{block_in_place, spawn_blocking}` or /// `tokio::task::{block_in_place, spawn_blocking}` or
/// `std::thread::spawn`. /// `std::thread::spawn`.
pub struct StreamReadable<S> { pub struct StreamReadable {
stream: S, rx: mpsc::Receiver<Bytes>,
handle: Handle,
bytes: Bytes, bytes: Bytes,
cancellation_future: CancellationFuture,
} }
impl<S> StreamReadable<S> { impl StreamReadable {
pub(super) async fn new(stream: S, cancellation_future: CancellationFuture) -> Self { pub(super) fn new(rx: mpsc::Receiver<Bytes>) -> Self {
Self { Self {
stream, rx,
handle: Handle::current(),
bytes: Bytes::new(), bytes: Bytes::new(),
cancellation_future,
} }
} }
} }
impl<S, E> StreamReadable<S> impl Read for StreamReadable {
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
DownloadError: From<E>,
{
/// Copies from `self` to `writer`.
///
/// Same as `io::copy` but does not allocate any internal buffer
/// since `self` is buffered.
pub(super) fn copy<W>(&mut self, mut writer: W) -> io::Result<()>
where
W: Write,
{
self.copy_inner(&mut writer)
}
fn copy_inner(&mut self, writer: &mut dyn Write) -> io::Result<()> {
loop {
let buf = self.fill_buf()?;
if buf.is_empty() {
// Eof
break Ok(());
}
writer.write_all(buf)?;
let n = buf.len();
self.consume(n);
}
}
}
impl<S, E> Read for StreamReadable<S>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
DownloadError: From<E>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.is_empty() { if buf.is_empty() {
return Ok(0); return Ok(0);
@ -82,60 +36,26 @@ where
// copy_to_slice requires the bytes to have enough remaining bytes // copy_to_slice requires the bytes to have enough remaining bytes
// to fill buf. // to fill buf.
let n = min(buf.len(), bytes.remaining()); let n = buf.len().min(bytes.remaining());
// <Bytes as Buf>::copy_to_slice copies and consumes the bytes
bytes.copy_to_slice(&mut buf[..n]); bytes.copy_to_slice(&mut buf[..n]);
Ok(n) Ok(n)
} }
} }
/// If `Ok(Some(bytes))` if returned, then `bytes.is_empty() == false`. impl BufRead for StreamReadable {
async fn next_stream<S, E>(stream: &mut S) -> io::Result<Option<Bytes>>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
DownloadError: From<E>,
{
loop {
let option = stream
.next()
.await
.transpose()
.map_err(DownloadError::from)?;
match option {
Some(bytes) if bytes.is_empty() => continue,
option => break Ok(option),
}
}
}
impl<S, E> BufRead for StreamReadable<S>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
DownloadError: From<E>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> { fn fill_buf(&mut self) -> io::Result<&[u8]> {
let bytes = &mut self.bytes; let bytes = &mut self.bytes;
if !bytes.has_remaining() { if !bytes.has_remaining() {
let option = self.handle.block_on(async { if let Some(new_bytes) = self.rx.blocking_recv() {
let cancellation_future = self.cancellation_future.as_mut();
tokio::select! {
biased;
res = await_on_option(cancellation_future) => {
Err(res.err().unwrap_or_else(|| io::Error::from(DownloadError::UserAbort)))
},
res = next_stream(&mut self.stream) => res,
}
})?;
if let Some(new_bytes) = option {
// new_bytes are guaranteed to be non-empty. // new_bytes are guaranteed to be non-empty.
*bytes = new_bytes; *bytes = new_bytes;
} }
} }
Ok(&*bytes) Ok(&*bytes)
} }

View file

@ -1,16 +1,22 @@
use std::future::{pending, Future}; use std::{future::Future, io};
/// Await on `future` if it is not `None`, or call [`pending`] use tokio::task;
/// so that this branch would never get selected again.
/// /// Copied from tokio https://docs.rs/tokio/latest/src/tokio/fs/mod.rs.html#132
/// Designed to use with [`tokio::select`]. pub(super) fn asyncify<F, T>(f: F) -> impl Future<Output = io::Result<T>> + Send + Sync + 'static
pub(super) async fn await_on_option<Fut, R>(future: Option<Fut>) -> R
where where
Fut: Future<Output = R>, F: FnOnce() -> io::Result<T> + Send + 'static,
T: Send + 'static,
{ {
if let Some(future) = future { async fn inner<T: Send + 'static>(handle: task::JoinHandle<io::Result<T>>) -> io::Result<T> {
future.await match handle.await {
} else { Ok(res) => res,
pending().await Err(err) => Err(io::Error::new(
io::ErrorKind::Other,
format!("background task failed: {err}"),
)),
}
} }
inner(task::spawn_blocking(f))
} }

View file

@ -1,13 +1,18 @@
use std::{ use std::{
io, io::Write,
path::{Component, Path, PathBuf}, path::{Component, Path, PathBuf},
}; };
use async_zip::{read::ZipEntryReader, ZipEntryExt}; use async_zip::{read::ZipEntryReader, ZipEntryExt};
use bytes::{Bytes, BytesMut};
use futures_util::future::{try_join, TryFutureExt};
use thiserror::Error as ThisError; use thiserror::Error as ThisError;
use tokio::{fs, io::AsyncRead, task::spawn_blocking}; use tokio::{
io::{AsyncRead, AsyncReadExt},
sync::mpsc,
};
use super::DownloadError; use super::{utils::asyncify, DownloadError};
#[derive(Debug, ThisError)] #[derive(Debug, ThisError)]
enum ZipErrorInner { enum ZipErrorInner {
@ -31,6 +36,7 @@ impl ZipError {
pub(super) async fn extract_zip_entry<R>( pub(super) async fn extract_zip_entry<R>(
entry: ZipEntryReader<'_, R>, entry: ZipEntryReader<'_, R>,
path: &Path, path: &Path,
buf: &mut BytesMut,
) -> Result<(), DownloadError> ) -> Result<(), DownloadError>
where where
R: AsyncRead + Unpin + Send + Sync, R: AsyncRead + Unpin + Send + Sync,
@ -68,31 +74,82 @@ where
}) })
.await?; .await?;
} else { } else {
// Use channel size = 5 to minimize the waiting time in the extraction task
let (tx, mut rx) = mpsc::channel::<Bytes>(5);
// This entry is a file. // This entry is a file.
let mut outfile = asyncify(move || { try_join(
if let Some(p) = outpath.parent() { asyncify(move || {
std::fs::create_dir_all(p)?; if let Some(p) = outpath.parent() {
} std::fs::create_dir_all(p)?;
let outfile = std::fs::File::create(&outpath)?; }
let mut outfile = std::fs::File::create(&outpath)?;
if let Some(perms) = perms { while let Some(bytes) = rx.blocking_recv() {
outfile.set_permissions(perms)?; outfile.write_all(&bytes)?;
} }
Ok(outfile) outfile.flush()?;
})
.await
.map(fs::File::from_std)?;
entry if let Some(perms) = perms {
.copy_to_end_crc(&mut outfile, 64 * 1024) outfile.set_permissions(perms)?;
.await }
.map_err(ZipError::from_inner)?;
Ok(())
})
.err_into(),
copy_file_to_mpsc(entry, tx, buf)
.map_err(ZipError::from_inner)
.map_err(DownloadError::from),
)
.await?;
} }
Ok(()) Ok(())
} }
async fn copy_file_to_mpsc<R>(
mut entry: ZipEntryReader<'_, R>,
tx: mpsc::Sender<Bytes>,
buf: &mut BytesMut,
) -> Result<(), async_zip::error::ZipError>
where
R: AsyncRead + Unpin + Send + Sync,
{
// Since BytesMut does not have a max cap, if AsyncReadExt::read_buf returns
// 0 then it means Eof.
while entry.read_buf(buf).await? != 0 {
// Ensure AsyncReadExt::read_buf can read at least 4096B to avoid
// frequent expensive read syscalls.
//
// Performs this reserve before sending the buf over mpsc queue to
// increase the possibility of reusing the previous allocation.
//
// NOTE: `BytesMut` only reuses the previous allocation if it is the
// only one holds the reference to it, which is either on the first
// iteration or all the `Bytes` in the mpsc queue has been consumed,
// written to the file and dropped.
//
// Since reading from entry would have to wait for external file I/O,
// this would give the blocking thread some time to flush `Bytes`
// out.
//
// If all `Bytes` are flushed out, then we can reuse the allocation here.
buf.reserve(4096);
if tx.send(buf.split().freeze()).await.is_err() {
// Same reason as extract_with_blocking_decoder
break;
}
}
if entry.compare_crc() {
Ok(())
} else {
Err(async_zip::error::ZipError::CRC32CheckError)
}
}
/// Ensure the file path is safe to use as a [`Path`]. /// Ensure the file path is safe to use as a [`Path`].
/// ///
/// - It can't contain NULL bytes /// - It can't contain NULL bytes
@ -132,18 +189,3 @@ fn check_filename_and_normalize(filename: &str) -> Option<PathBuf> {
Some(path) Some(path)
} }
/// Copied from tokio https://docs.rs/tokio/latest/src/tokio/fs/mod.rs.html#132
async fn asyncify<F, T>(f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T> + Send + 'static,
T: Send + 'static,
{
match spawn_blocking(f).await {
Ok(res) => res,
Err(_) => Err(io::Error::new(
io::ErrorKind::Other,
"background task failed",
)),
}
}

View file

@ -10,7 +10,6 @@ use crate::{
helpers::{ helpers::{
download::Download, download::Download,
remote::{Client, Url}, remote::{Client, Url},
signal::wait_on_cancellation_signal,
}, },
manifests::cargo_toml_binstall::{Meta, TarBasedFmt}, manifests::cargo_toml_binstall::{Meta, TarBasedFmt},
}; };
@ -54,10 +53,6 @@ pub async fn fetch_crate_cratesio(
let manifest_dir_path: PathBuf = format!("{name}-{version_name}").into(); let manifest_dir_path: PathBuf = format!("{name}-{version_name}").into();
Ok(Download::new(client, Url::parse(&crate_url)?) Ok(Download::new(client, Url::parse(&crate_url)?)
.and_visit_tar( .and_visit_tar(TarBasedFmt::Tgz, ManifestVisitor::new(manifest_dir_path))
TarBasedFmt::Tgz,
ManifestVisitor::new(manifest_dir_path),
Some(Box::pin(wait_on_cancellation_signal())),
)
.await?) .await?)
} }

View file

@ -15,7 +15,6 @@ use crate::{
helpers::{ helpers::{
download::Download, download::Download,
remote::{Client, Method}, remote::{Client, Method},
signal::wait_on_cancellation_signal,
tasks::AutoAbortJoinHandle, tasks::AutoAbortJoinHandle,
}, },
manifests::cargo_toml_binstall::{PkgFmt, PkgMeta}, manifests::cargo_toml_binstall::{PkgFmt, PkgMeta},
@ -167,7 +166,7 @@ impl super::Fetcher for GhCrateMeta {
let (url, pkg_fmt) = self.resolution.get().unwrap(); // find() is called first let (url, pkg_fmt) = self.resolution.get().unwrap(); // find() is called first
debug!("Downloading package from: '{url}' dst:{dst:?} fmt:{pkg_fmt:?}"); debug!("Downloading package from: '{url}' dst:{dst:?} fmt:{pkg_fmt:?}");
Ok(Download::new(self.client.clone(), url.clone()) Ok(Download::new(self.client.clone(), url.clone())
.and_extract(*pkg_fmt, dst, Some(Box::pin(wait_on_cancellation_signal()))) .and_extract(*pkg_fmt, dst)
.await?) .await?)
} }

View file

@ -9,7 +9,6 @@ use crate::{
helpers::{ helpers::{
download::Download, download::Download,
remote::{Client, Method}, remote::{Client, Method},
signal::wait_on_cancellation_signal,
tasks::AutoAbortJoinHandle, tasks::AutoAbortJoinHandle,
}, },
manifests::cargo_toml_binstall::{PkgFmt, PkgMeta}, manifests::cargo_toml_binstall::{PkgFmt, PkgMeta},
@ -72,11 +71,7 @@ impl super::Fetcher for QuickInstall {
let url = self.package_url(); let url = self.package_url();
debug!("Downloading package from: '{url}'"); debug!("Downloading package from: '{url}'");
Ok(Download::new(self.client.clone(), Url::parse(&url)?) Ok(Download::new(self.client.clone(), Url::parse(&url)?)
.and_extract( .and_extract(self.pkg_fmt(), dst)
self.pkg_fmt(),
dst,
Some(Box::pin(wait_on_cancellation_signal())),
)
.await?) .await?)
} }

View file

@ -3,7 +3,7 @@ use std::{future::pending, io};
use super::tasks::AutoAbortJoinHandle; use super::tasks::AutoAbortJoinHandle;
use crate::errors::BinstallError; use crate::errors::BinstallError;
use tokio::{signal, sync::OnceCell}; use tokio::signal;
/// This function will poll the handle while listening for ctrl_c, /// This function will poll the handle while listening for ctrl_c,
/// `SIGINT`, `SIGHUP`, `SIGTERM` and `SIGQUIT`. /// `SIGINT`, `SIGHUP`, `SIGTERM` and `SIGQUIT`.
@ -30,7 +30,7 @@ pub async fn cancel_on_user_sig_term<T>(
} }
} }
pub fn ignore_signals() -> io::Result<()> { fn ignore_signals() -> io::Result<()> {
#[cfg(unix)] #[cfg(unix)]
unix::ignore_signals_on_unix()?; unix::ignore_signals_on_unix()?;
@ -39,16 +39,7 @@ pub fn ignore_signals() -> io::Result<()> {
/// If call to it returns `Ok(())`, then all calls to this function after /// If call to it returns `Ok(())`, then all calls to this function after
/// that also returns `Ok(())`. /// that also returns `Ok(())`.
pub async fn wait_on_cancellation_signal() -> Result<(), io::Error> { async fn wait_on_cancellation_signal() -> Result<(), io::Error> {
static CANCELLED: OnceCell<()> = OnceCell::const_new();
CANCELLED
.get_or_try_init(wait_on_cancellation_signal_inner)
.await
.copied()
}
async fn wait_on_cancellation_signal_inner() -> Result<(), io::Error> {
#[cfg(unix)] #[cfg(unix)]
async fn inner() -> Result<(), io::Error> { async fn inner() -> Result<(), io::Error> {
unix::wait_on_cancellation_signal_unix().await unix::wait_on_cancellation_signal_unix().await
@ -56,16 +47,10 @@ async fn wait_on_cancellation_signal_inner() -> Result<(), io::Error> {
#[cfg(not(unix))] #[cfg(not(unix))]
async fn inner() -> Result<(), io::Error> { async fn inner() -> Result<(), io::Error> {
// Use pending here so that tokio::select! would just skip this branch. signal::ctrl_c().await
pending().await
} }
tokio::select! { inner().await
biased;
res = signal::ctrl_c() => res,
res = inner() => res,
}
} }
#[cfg(unix)] #[cfg(unix)]

View file

@ -398,7 +398,15 @@ impl PackageInfo {
// Fetch crate via crates.io, git, or use a local manifest path // Fetch crate via crates.io, git, or use a local manifest path
let manifest = match opts.manifest_path.as_ref() { let manifest = match opts.manifest_path.as_ref() {
Some(manifest_path) => load_manifest_path(manifest_path)?, Some(manifest_path) => load_manifest_path(manifest_path)?,
None => fetch_crate_cratesio(client, crates_io_api_client, &name, &version_req).await?, None => {
Box::pin(fetch_crate_cratesio(
client,
crates_io_api_client,
&name,
&version_req,
))
.await?
}
}; };
let Some(mut package) = manifest.package else { let Some(mut package) = manifest.package else {