2024-01-01 19:58:21 +00:00
|
|
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
2023-05-19 00:08:57 +00:00
|
|
|
use std::fmt::Debug;
|
|
|
|
use std::str::FromStr;
|
2020-12-13 18:45:53 +00:00
|
|
|
|
2023-08-23 23:03:05 +00:00
|
|
|
use deno_core::unsync::MaskFutureAsSend;
|
2023-06-16 15:33:28 +00:00
|
|
|
#[cfg(tokio_unstable)]
|
2023-06-09 11:52:51 +00:00
|
|
|
use tokio_metrics::RuntimeMonitor;
|
2023-05-14 21:40:01 +00:00
|
|
|
|
2023-05-19 00:08:57 +00:00
|
|
|
/// Default configuration for tokio. In the future, this method may have different defaults
|
|
|
|
/// depending on the platform and/or CPU layout.
|
|
|
|
const fn tokio_configuration() -> (u32, u32, usize) {
|
|
|
|
(61, 31, 1024)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn tokio_env<T: FromStr>(name: &'static str, default: T) -> T
|
|
|
|
where
|
|
|
|
<T as FromStr>::Err: Debug,
|
|
|
|
{
|
|
|
|
match std::env::var(name) {
|
|
|
|
Ok(value) => value.parse().unwrap(),
|
|
|
|
Err(_) => default,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-13 18:45:53 +00:00
|
|
|
pub fn create_basic_runtime() -> tokio::runtime::Runtime {
|
2023-05-19 00:08:57 +00:00
|
|
|
let (event_interval, global_queue_interval, max_io_events_per_tick) =
|
|
|
|
tokio_configuration();
|
|
|
|
|
2021-01-12 07:50:02 +00:00
|
|
|
tokio::runtime::Builder::new_current_thread()
|
2020-12-13 18:45:53 +00:00
|
|
|
.enable_io()
|
|
|
|
.enable_time()
|
2023-05-19 00:08:57 +00:00
|
|
|
.event_interval(tokio_env("DENO_TOKIO_EVENT_INTERVAL", event_interval))
|
|
|
|
.global_queue_interval(tokio_env(
|
|
|
|
"DENO_TOKIO_GLOBAL_QUEUE_INTERVAL",
|
|
|
|
global_queue_interval,
|
|
|
|
))
|
|
|
|
.max_io_events_per_tick(tokio_env(
|
|
|
|
"DENO_TOKIO_MAX_IO_EVENTS_PER_TICK",
|
|
|
|
max_io_events_per_tick,
|
|
|
|
))
|
2020-12-13 18:45:53 +00:00
|
|
|
// This limits the number of threads for blocking operations (like for
|
|
|
|
// synchronous fs ops) or CPU bound tasks like when we run dprint in
|
|
|
|
// parallel for deno fmt.
|
|
|
|
// The default value is 512, which is an unhelpfully large thread pool. We
|
|
|
|
// don't ever want to have more than a couple dozen threads.
|
fix(cli): increase size of blocking task threadpool on windows (#26465)
Fixes #26179.
The original error reported in that issue is fixed on canary, but in
local testing on my windows machine, `next build` would just hang
forever.
After some digging, what happens is that at some point in next build,
readFile promises (from `fs/promises` ) just never resolve, and so next
hangs.
It turns out the issue is saturating tokio's blocking task thread pool.
We previously limited the number of blocking threads to 32, and at some
point those threads are all in use and there's no thread available for
the file reads.
What's taking up all of those threads? The answer turns out to be
`tokio::process`. On windows, child process stdio uses the blocking
threadpool: https://github.com/tokio-rs/tokio/pull/4824. When you poll
the child's stdio on windows, it spawns a blocking task per poll, and
calls `std::io::Read::read` in the blocking context. That call can block
until data is available.
Putting it all together, what happens is that Next.js spawns `2 * the
number of CPU cores` deno child subprocesses to do work. We implement
`child_process` with `tokio::process`. When the child processes' stdio
get polled, blocking tasks get spawned, and those blocking tasks might
block until data is available. So if you have 16 cores (as I do), there
are going to be potentially >32 blocking task threadpool threads taken
just by the child processes. That leaves no room for other tasks to make
progress
---
To fix this, for now, increase the size of the blocking threadpool on
windows. 4 * the number of CPU cores should be enough to leave room for
other tasks to make progress.
Longer term, this can be fixed more properly when we handroll our own
subprocess code (needed for detached processes and additional pipes on
windows).
2024-10-22 19:52:18 +00:00
|
|
|
.max_blocking_threads(if cfg!(windows) {
|
|
|
|
// on windows, tokio uses blocking tasks for child process IO, make sure
|
|
|
|
// we have enough available threads for other tasks to run
|
|
|
|
4 * std::thread::available_parallelism()
|
|
|
|
.map(|n| n.get())
|
|
|
|
.unwrap_or(8)
|
|
|
|
} else {
|
|
|
|
32
|
|
|
|
})
|
2020-12-13 18:45:53 +00:00
|
|
|
.build()
|
|
|
|
.unwrap()
|
|
|
|
}
|
2021-10-21 11:05:43 +00:00
|
|
|
|
2023-05-17 21:49:57 +00:00
|
|
|
#[inline(always)]
|
2023-06-09 11:52:51 +00:00
|
|
|
fn create_and_run_current_thread_inner<F, R>(
|
|
|
|
future: F,
|
|
|
|
metrics_enabled: bool,
|
|
|
|
) -> R
|
2021-10-21 11:05:43 +00:00
|
|
|
where
|
2023-05-14 21:40:01 +00:00
|
|
|
F: std::future::Future<Output = R> + 'static,
|
|
|
|
R: Send + 'static,
|
2021-10-21 11:05:43 +00:00
|
|
|
{
|
|
|
|
let rt = create_basic_runtime();
|
2023-05-17 21:49:57 +00:00
|
|
|
|
|
|
|
// Since this is the main future, we want to box it in debug mode because it tends to be fairly
|
|
|
|
// large and the compiler won't optimize repeated copies. We also make this runtime factory
|
|
|
|
// function #[inline(always)] to avoid holding the unboxed, unused future on the stack.
|
|
|
|
|
|
|
|
#[cfg(debug_assertions)]
|
|
|
|
// SAFETY: this this is guaranteed to be running on a current-thread executor
|
|
|
|
let future = Box::pin(unsafe { MaskFutureAsSend::new(future) });
|
|
|
|
|
|
|
|
#[cfg(not(debug_assertions))]
|
2023-05-14 21:40:01 +00:00
|
|
|
// SAFETY: this this is guaranteed to be running on a current-thread executor
|
|
|
|
let future = unsafe { MaskFutureAsSend::new(future) };
|
2023-05-17 21:49:57 +00:00
|
|
|
|
2023-06-16 15:33:28 +00:00
|
|
|
#[cfg(tokio_unstable)]
|
2023-06-09 11:52:51 +00:00
|
|
|
let join_handle = if metrics_enabled {
|
|
|
|
rt.spawn(async move {
|
|
|
|
let metrics_interval: u64 = std::env::var("DENO_TOKIO_METRICS_INTERVAL")
|
|
|
|
.ok()
|
|
|
|
.and_then(|val| val.parse().ok())
|
|
|
|
.unwrap_or(1000);
|
|
|
|
let handle = tokio::runtime::Handle::current();
|
|
|
|
let runtime_monitor = RuntimeMonitor::new(&handle);
|
|
|
|
tokio::spawn(async move {
|
2024-05-09 02:45:06 +00:00
|
|
|
#[allow(clippy::print_stderr)]
|
2023-06-09 11:52:51 +00:00
|
|
|
for interval in runtime_monitor.intervals() {
|
2024-05-09 02:45:06 +00:00
|
|
|
eprintln!("{:#?}", interval);
|
2023-06-09 11:52:51 +00:00
|
|
|
// wait 500ms
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(
|
|
|
|
metrics_interval,
|
|
|
|
))
|
|
|
|
.await;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
future.await
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
rt.spawn(future)
|
|
|
|
};
|
2023-06-16 15:33:28 +00:00
|
|
|
|
|
|
|
#[cfg(not(tokio_unstable))]
|
|
|
|
let join_handle = rt.spawn(future);
|
|
|
|
|
2024-04-25 05:32:01 +00:00
|
|
|
let r = rt.block_on(join_handle).unwrap().into_inner();
|
|
|
|
// Forcefully shutdown the runtime - we're done executing JS code at this
|
|
|
|
// point, but there might be outstanding blocking tasks that were created and
|
|
|
|
// latered "unrefed". They won't terminate on their own, so we're forcing
|
|
|
|
// termination of Tokio runtime at this point.
|
|
|
|
rt.shutdown_background();
|
|
|
|
r
|
2021-10-21 11:05:43 +00:00
|
|
|
}
|
2023-06-09 11:52:51 +00:00
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn create_and_run_current_thread<F, R>(future: F) -> R
|
|
|
|
where
|
|
|
|
F: std::future::Future<Output = R> + 'static,
|
|
|
|
R: Send + 'static,
|
|
|
|
{
|
|
|
|
create_and_run_current_thread_inner(future, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn create_and_run_current_thread_with_maybe_metrics<F, R>(future: F) -> R
|
|
|
|
where
|
|
|
|
F: std::future::Future<Output = R> + 'static,
|
|
|
|
R: Send + 'static,
|
|
|
|
{
|
|
|
|
let metrics_enabled = std::env::var("DENO_TOKIO_METRICS").ok().is_some();
|
|
|
|
create_and_run_current_thread_inner(future, metrics_enabled)
|
|
|
|
}
|