mirror of
https://github.com/cargo-bins/cargo-binstall.git
synced 2025-04-22 21:48:42 +00:00

* Avoid potential panicking in `args::parse` by using `Vec::get` instead of indexing * Refactor: Simplify `opts::{resolve, install}` API Many parameters can be shared and put into `opts::Options` intead and that would also avoid a few `Arc<Path>`. * Optimize `get_install_path`: Avoid cloning `install_path` * Optimize `LazyJobserverClient`: Un`Arc` & remove `Clone` impl to avoid additional boxing * Optimize `find_version`: Avoid cloning `semver::Version` * Optimize `GhCrateMeta::launch_baseline_find_tasks` return `impl Iterator<Item = impl Future<Output = ...>>` instead of `impl Iterator<Item = AutoAbortJoinHandle<...>>` to avoid unnecessary spawning. Each task spawned has to be boxed and then polled by tokio runtime. They might also be moved. While they increase parallelism, spawning these futures does not justify the costs because: - Each `Future` only calls `remote_exists` - Each `remote_exists` call send requests to the same domain, which is likely to share the same http2 connection. Since the conn is shared anyway, spawning does not speedup anything but merely add communication overhead. - Plus the tokio runtime spawning cost * Optimize `install_crates`: Destruct `Args` before any `.await` point to reduce size of the future * Refactor `logging`: Replace param `arg` with `log_level` & `json_output` to avoid dep on `Args` * Add dep strum & strum_macros to crates/bin * Derive `strum_macros::EnumCount` for `Strategy` * Optimize strategies parsing in `install_crates` * Fix panic in `install_crates` when `Compile` is not the last strategy specified * Optimize: Take `Vec<Self>` instead of slice in `CrateName::dedup` * Refactor: Extract new fn `compute_resolvers` * Refactor: Extract new fn `compute_paths_and_load_manifests` * Refactor: Extract new fn `filter_out_installed_crates` * Reorder `install_crates`: Only run target detection if args are valid and there are some crates to be installed. * Optimize `filter_out_installed_crates`: Avoid allocation by returning an `Iterator` * Fix user_agent of `remote::Client`: Let user specify it * Refactor: Replace `UIThread` with `ui::confirm` which is much simpler. Signed-off-by: Jiahao XU <Jiahao_XU@outlook.com>
178 lines
5.3 KiB
Rust
178 lines
5.3 KiB
Rust
use std::{cmp::min, iter::repeat};
|
|
|
|
use log::{LevelFilter, Log, STATIC_MAX_LEVEL};
|
|
use once_cell::sync::Lazy;
|
|
use supports_color::{on as supports_color_on_stream, Stream::Stdout};
|
|
use tracing::{
|
|
callsite::Callsite,
|
|
dispatcher, field,
|
|
subscriber::{self, set_global_default},
|
|
Event, Level, Metadata,
|
|
};
|
|
use tracing_core::{identify_callsite, metadata::Kind, subscriber::Subscriber};
|
|
use tracing_log::AsTrace;
|
|
use tracing_subscriber::{filter::targets::Targets, fmt::fmt, layer::SubscriberExt};
|
|
|
|
// Shamelessly taken from tracing-log
|
|
|
|
struct Fields {
|
|
message: field::Field,
|
|
}
|
|
|
|
static FIELD_NAMES: &[&str] = &["message"];
|
|
|
|
impl Fields {
|
|
fn new(cs: &'static dyn Callsite) -> Self {
|
|
let fieldset = cs.metadata().fields();
|
|
let message = fieldset.field("message").unwrap();
|
|
Fields { message }
|
|
}
|
|
}
|
|
|
|
macro_rules! log_cs {
|
|
($level:expr, $cs:ident, $meta:ident, $fields:ident, $ty:ident) => {
|
|
struct $ty;
|
|
static $cs: $ty = $ty;
|
|
static $meta: Metadata<'static> = Metadata::new(
|
|
"log event",
|
|
"log",
|
|
$level,
|
|
None,
|
|
None,
|
|
None,
|
|
field::FieldSet::new(FIELD_NAMES, identify_callsite!(&$cs)),
|
|
Kind::EVENT,
|
|
);
|
|
static $fields: Lazy<Fields> = Lazy::new(|| Fields::new(&$cs));
|
|
|
|
impl Callsite for $ty {
|
|
fn set_interest(&self, _: subscriber::Interest) {}
|
|
fn metadata(&self) -> &'static Metadata<'static> {
|
|
&$meta
|
|
}
|
|
}
|
|
};
|
|
}
|
|
|
|
log_cs!(
|
|
Level::TRACE,
|
|
TRACE_CS,
|
|
TRACE_META,
|
|
TRACE_FIELDS,
|
|
TraceCallsite
|
|
);
|
|
log_cs!(
|
|
Level::DEBUG,
|
|
DEBUG_CS,
|
|
DEBUG_META,
|
|
DEBUG_FIELDS,
|
|
DebugCallsite
|
|
);
|
|
log_cs!(Level::INFO, INFO_CS, INFO_META, INFO_FIELDS, InfoCallsite);
|
|
log_cs!(Level::WARN, WARN_CS, WARN_META, WARN_FIELDS, WarnCallsite);
|
|
log_cs!(
|
|
Level::ERROR,
|
|
ERROR_CS,
|
|
ERROR_META,
|
|
ERROR_FIELDS,
|
|
ErrorCallsite
|
|
);
|
|
|
|
fn loglevel_to_cs(level: log::Level) -> (&'static Fields, &'static Metadata<'static>) {
|
|
match level {
|
|
log::Level::Trace => (&*TRACE_FIELDS, &TRACE_META),
|
|
log::Level::Debug => (&*DEBUG_FIELDS, &DEBUG_META),
|
|
log::Level::Info => (&*INFO_FIELDS, &INFO_META),
|
|
log::Level::Warn => (&*WARN_FIELDS, &WARN_META),
|
|
log::Level::Error => (&*ERROR_FIELDS, &ERROR_META),
|
|
}
|
|
}
|
|
|
|
struct Logger;
|
|
|
|
impl Logger {
|
|
fn init(log_level: LevelFilter) {
|
|
log::set_max_level(log_level);
|
|
log::set_logger(&Self).unwrap();
|
|
}
|
|
}
|
|
|
|
impl Log for Logger {
|
|
fn enabled(&self, metadata: &log::Metadata<'_>) -> bool {
|
|
if metadata.level() > log::max_level() {
|
|
// First, check the log record against the current max level enabled.
|
|
false
|
|
} else {
|
|
// Check if the current `tracing` dispatcher cares about this.
|
|
dispatcher::get_default(|dispatch| dispatch.enabled(&metadata.as_trace()))
|
|
}
|
|
}
|
|
|
|
fn log(&self, record: &log::Record<'_>) {
|
|
// Dispatch manually instead of using methods provided by tracing-log
|
|
// to avoid having fields "log.target = ..." in the log message,
|
|
// which makes the log really hard to read.
|
|
if self.enabled(record.metadata()) {
|
|
dispatcher::get_default(|dispatch| {
|
|
let (keys, meta) = loglevel_to_cs(record.level());
|
|
|
|
dispatch.event(&Event::new(
|
|
meta,
|
|
&meta
|
|
.fields()
|
|
.value_set(&[(&keys.message, Some(record.args() as &dyn field::Value))]),
|
|
));
|
|
});
|
|
}
|
|
}
|
|
|
|
fn flush(&self) {}
|
|
}
|
|
|
|
pub fn logging(log_level: LevelFilter, json_output: bool) {
|
|
// Calculate log_level
|
|
let log_level = min(log_level, STATIC_MAX_LEVEL);
|
|
|
|
let allowed_targets =
|
|
(log_level != LevelFilter::Trace).then_some(["binstalk", "cargo_binstall"]);
|
|
|
|
// Forward log to tracing
|
|
Logger::init(log_level);
|
|
|
|
// Build fmt subscriber
|
|
let log_level = log_level.as_trace();
|
|
let subscriber_builder = fmt().with_max_level(log_level);
|
|
|
|
let subscriber: Box<dyn Subscriber + Send + Sync> = if json_output {
|
|
Box::new(subscriber_builder.json().finish())
|
|
} else {
|
|
// Disable time, target, file, line_num, thread name/ids to make the
|
|
// output more readable
|
|
let subscriber_builder = subscriber_builder
|
|
.without_time()
|
|
.with_target(false)
|
|
.with_file(false)
|
|
.with_line_number(false)
|
|
.with_thread_names(false)
|
|
.with_thread_ids(false);
|
|
|
|
// subscriber_builder defaults to write to io::stdout(),
|
|
// so tests whether it supports color.
|
|
let stdout_supports_color = supports_color_on_stream(Stdout)
|
|
.map(|color_level| color_level.has_basic)
|
|
.unwrap_or_default();
|
|
|
|
Box::new(subscriber_builder.with_ansi(stdout_supports_color).finish())
|
|
};
|
|
|
|
// Builder layer for filtering
|
|
let filter_layer = allowed_targets.map(|allowed_targets| {
|
|
Targets::new().with_targets(allowed_targets.into_iter().zip(repeat(log_level)))
|
|
});
|
|
|
|
// Builder final subscriber with filtering
|
|
let subscriber = subscriber.with(filter_layer);
|
|
|
|
// Setup global subscriber
|
|
set_global_default(subscriber).unwrap();
|
|
}
|